diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 6bc20230bafe7..c16691ca580e4 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -244,8 +244,9 @@ stages: matrix: api: CI_TARGET: "bazel.api" - api_compat: - CI_TARGET: "bazel.api_compat" + # Disabled due to https://github.com/envoyproxy/envoy/pull/18218 + # api_compat: + # CI_TARGET: "bazel.api_compat" gcc: CI_TARGET: "bazel.gcc" clang_tidy: @@ -381,6 +382,15 @@ stages: GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) displayName: "Generate docs" + - script: | + ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/docs docs' + displayName: "Upload Docs to GCS" + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) + condition: eq(variables['Build.SourceBranch'], 'refs/heads/main') + - task: InstallSSHKey@0 inputs: hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" @@ -393,6 +403,7 @@ stages: workingDirectory: $(Build.SourcesDirectory) env: AZP_BRANCH: $(Build.SourceBranch) + NETLIFY_TRIGGER_URL: $(NetlifyTriggerURL) - stage: verify dependsOn: ["docker"] @@ -454,7 +465,7 @@ stages: testRunTitle: "macOS" condition: always() - - script: ./ci/flaky_test/run_process_xml.sh + - script: bazel run //ci/flaky_test:process_xml displayName: "Process Test Results" env: TEST_TMPDIR: $(Build.SourcesDirectory) @@ -501,46 +512,23 @@ stages: artifactName: windows.release condition: always() - - job: clang_cl - timeoutInMinutes: 120 - pool: - vmImage: "windows-latest" - steps: - - task: Cache@2 - inputs: - key: '"windows.release" | ./WORKSPACE | **/*.bzl' - path: $(Build.StagingDirectory)/repository_cache - continueOnError: true - - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh - displayName: "Run Windows clang-cl CI" - env: - CI_TARGET: "windows" - ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)" - SLACK_TOKEN: $(SLACK_TOKEN) - REPO_URI: $(Build.Repository.Uri) - BUILD_URI: $(Build.BuildUri) - ENVOY_RBE: "true" - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-clang-cl --jobs=$(RbeJobs) --flaky_test_attempts=2" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - - task: PublishTestResults@2 - inputs: - testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml" - testRunTitle: "clang-cl" - searchFolder: $(Build.StagingDirectory)/tmp - condition: always() - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/envoy" - artifactName: windows.clang-cl - condition: always() - - job: docker + strategy: + matrix: + windows2019: + imageName: 'windows-latest' + windowsBuildType: "windows" + windowsImageBase: "mcr.microsoft.com/windows/servercore" + windowsImageTag: "ltsc2019" + windows2022: + imageName: 'windows-2022' + windowsBuildType: "windows-ltsc2022" + windowsImageBase: "mcr.microsoft.com/windows/nanoserver" + windowsImageTag: "ltsc2022" dependsOn: ["release"] timeoutInMinutes: 120 pool: - vmImage: "windows-latest" + vmImage: $(imageName) steps: - task: DownloadBuildArtifacts@0 inputs: @@ -561,6 +549,9 @@ stages: AZP_SHA1: $(Build.SourceVersion) DOCKERHUB_USERNAME: $(DockerUsername) DOCKERHUB_PASSWORD: $(DockerPassword) + WINDOWS_BUILD_TYPE: $(windowsBuildType) + WINDOWS_IMAGE_BASE: $(windowsImageBase) + WINDOWS_IMAGE_TAG: $(windowsImageTag) - task: PublishBuildArtifacts@1 inputs: pathtoPublish: "$(Build.StagingDirectory)/build_images" diff --git a/.bazelrc b/.bazelrc index aa0bd78598253..d71b1261bd08c 100644 --- a/.bazelrc +++ b/.bazelrc @@ -167,17 +167,17 @@ build:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 build:rbe-toolchain-clang --config=rbe-toolchain -build:rbe-toolchain-clang --platforms=@rbe_ubuntu_clang//config:platform -build:rbe-toolchain-clang --host_platform=@rbe_ubuntu_clang//config:platform -build:rbe-toolchain-clang --crosstool_top=@rbe_ubuntu_clang//cc:toolchain -build:rbe-toolchain-clang --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain +build:rbe-toolchain-clang --platforms=@envoy_build_tools//toolchains:rbe_linux_clang_platform +build:rbe-toolchain-clang --host_platform=@envoy_build_tools//toolchains:rbe_linux_clang_platform +build:rbe-toolchain-clang --crosstool_top=@envoy_build_tools//toolchains/configs/linux/clang/cc:toolchain +build:rbe-toolchain-clang --extra_toolchains=@envoy_build_tools//toolchains/configs/linux/clang/config:cc-toolchain build:rbe-toolchain-clang --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin build:rbe-toolchain-clang-libc++ --config=rbe-toolchain -build:rbe-toolchain-clang-libc++ --platforms=@rbe_ubuntu_clang_libcxx//config:platform -build:rbe-toolchain-clang-libc++ --host_platform=@rbe_ubuntu_clang_libcxx//config:platform -build:rbe-toolchain-clang-libc++ --crosstool_top=@rbe_ubuntu_clang_libcxx//cc:toolchain -build:rbe-toolchain-clang-libc++ --extra_toolchains=@rbe_ubuntu_clang_libcxx//config:cc-toolchain +build:rbe-toolchain-clang-libc++ --platforms=@envoy_build_tools//toolchains:rbe_linux_clang_libcxx_platform +build:rbe-toolchain-clang-libc++ --host_platform=@envoy_build_tools//toolchains:rbe_linux_clang_libcxx_platform +build:rbe-toolchain-clang-libc++ --crosstool_top=@envoy_build_tools//toolchains/configs/linux/clang_libcxx/cc:toolchain +build:rbe-toolchain-clang-libc++ --extra_toolchains=@envoy_build_tools//toolchains/configs/linux/clang_libcxx/config:cc-toolchain build:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin build:rbe-toolchain-clang-libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:rbe-toolchain-clang-libc++ --action_env=LDFLAGS=-stdlib=libc++ @@ -202,20 +202,20 @@ build:rbe-toolchain-tsan --linkopt=-Wl,-rpath,/opt/libcxx_tsan/lib build:rbe-toolchain-tsan --config=clang-tsan build:rbe-toolchain-gcc --config=rbe-toolchain -build:rbe-toolchain-gcc --platforms=@rbe_ubuntu_gcc//config:platform -build:rbe-toolchain-gcc --host_platform=@rbe_ubuntu_gcc//config:platform -build:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain -build:rbe-toolchain-gcc --extra_toolchains=@rbe_ubuntu_gcc//config:cc-toolchain +build:rbe-toolchain-gcc --platforms=@envoy_build_tools//toolchains:rbe_linux_gcc_platform +build:rbe-toolchain-gcc --host_platform=@envoy_build_tools//toolchains:rbe_linux_gcc_platform +build:rbe-toolchain-gcc --crosstool_top=@envoy_build_tools//toolchains/configs/linux/gcc/cc:toolchain +build:rbe-toolchain-gcc --extra_toolchains=@envoy_build_tools//toolchains/configs/linux/gcc/config:cc-toolchain -build:rbe-toolchain-msvc-cl --host_platform=@rbe_windows_msvc_cl//config:platform -build:rbe-toolchain-msvc-cl --platforms=@rbe_windows_msvc_cl//config:platform -build:rbe-toolchain-msvc-cl --crosstool_top=@rbe_windows_msvc_cl//cc:toolchain -build:rbe-toolchain-msvc-cl --extra_toolchains=@rbe_windows_msvc_cl//config:cc-toolchain +build:rbe-toolchain-msvc-cl --host_platform=@envoy_build_tools//toolchains:rbe_windows_msvc_cl_platform +build:rbe-toolchain-msvc-cl --platforms=@envoy_build_tools//toolchains:rbe_windows_msvc_cl_platform +build:rbe-toolchain-msvc-cl --crosstool_top=@envoy_build_tools//toolchains/configs/windows/msvc-cl/cc:toolchain +build:rbe-toolchain-msvc-cl --extra_toolchains=@envoy_build_tools//toolchains/configs/windows/msvc-cl/config:cc-toolchain -build:rbe-toolchain-clang-cl --host_platform=@rbe_windows_clang_cl//config:platform -build:rbe-toolchain-clang-cl --platforms=@rbe_windows_clang_cl//config:platform -build:rbe-toolchain-clang-cl --crosstool_top=@rbe_windows_clang_cl//cc:toolchain -build:rbe-toolchain-clang-cl --extra_toolchains=@rbe_windows_clang_cl//config:cc-toolchain +build:rbe-toolchain-clang-cl --host_platform=@envoy_build_tools//toolchains:rbe_windows_clang_cl_platform +build:rbe-toolchain-clang-cl --platforms=@envoy_build_tools//toolchains:rbe_windows_clang_cl_platform +build:rbe-toolchain-clang-cl --crosstool_top=@envoy_build_tools//toolchains/configs/windows/clang-cl/cc:toolchain +build:rbe-toolchain-clang-cl --extra_toolchains=@envoy_build_tools//toolchains/configs/windows/clang-cl/config:cc-toolchain build:remote --spawn_strategy=remote,sandboxed,local build:remote --strategy=Javac=remote,sandboxed,local @@ -265,7 +265,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/main/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:55d9e4719d2bd0accce8f829b44dab70cd42112a +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:81a93046060dbe5620d5b3aa92632090a9ee4da6 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 378a45b4f1af2..8da8671498066 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/envoy-ci/envoy-build:55d9e4719d2bd0accce8f829b44dab70cd42112a +FROM gcr.io/envoy-ci/envoy-build:81a93046060dbe5620d5b3aa92632090a9ee4da6 ARG USERNAME=vscode ARG USER_UID=501 diff --git a/.github/actions/pr_notifier/pr_notifier.py b/.github/actions/pr_notifier/pr_notifier.py index 9c6277ddac81d..de2aba2f6c2b3 100644 --- a/.github/actions/pr_notifier/pr_notifier.py +++ b/.github/actions/pr_notifier/pr_notifier.py @@ -69,6 +69,10 @@ def is_waiting(labels): return False +def is_contrib(labels): + return any(label.name == "contrib" for label in labels) + + # Return true if the PR has an API tag, false otherwise. def is_api(labels): for label in labels: @@ -174,7 +178,7 @@ def track_prs(): pr_info.assignees, maintainers_and_prs, message, MAINTAINERS, FIRST_PASS) # If there was no maintainer, track it as unassigned. - if not has_maintainer_assignee: + if not has_maintainer_assignee and not is_contrib(labels): maintainers_and_prs['unassigned'] = maintainers_and_prs['unassigned'] + message # Return the dict of {maintainers : PR notifications}, diff --git a/.github/actions/pr_notifier/requirements.txt b/.github/actions/pr_notifier/requirements.txt index 2fa1aad74b299..fb21f429db9fa 100644 --- a/.github/actions/pr_notifier/requirements.txt +++ b/.github/actions/pr_notifier/requirements.txt @@ -63,9 +63,9 @@ chardet==4.0.0 \ --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 # via requests -deprecated==1.2.12 \ - --hash=sha256:08452d69b6b5bc66e8330adde0a4f8642e969b9e1702904d137eeb29c8ffc771 \ - --hash=sha256:6d2de2de7931a968874481ef30208fd4e08da39177d61d3d4ebdf4366e7dbca1 +deprecated==1.2.13 \ + --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d \ + --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d # via pygithub idna==2.10 \ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ @@ -78,7 +78,7 @@ pycparser==2.20 \ pygithub==1.55 \ --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b - # via -r .github/actions/pr_notifier/requirements.txt + # via -r requirements.in pyjwt==2.1.0 \ --hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \ --hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130 @@ -111,10 +111,10 @@ six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via pynacl -slack-sdk==3.10.1 \ - --hash=sha256:f17b71a578e94204d9033bffded634475f4ca0a6274c6c7a4fd8a9cb0ac7cd8b \ - --hash=sha256:2b4dde7728eb4ff5a581025d204578ccff25a5d8f0fe11ae175e3ce6e074434f - # via -r .github/actions/pr_notifier/requirements.txt +slack_sdk==3.11.2 \ + --hash=sha256:131bf605894525c2d66da064677eabc19f53f02ce0f82a3f2fa130d4ec3bc1b0 \ + --hash=sha256:35245ec34c8549fbb5c43ccc17101afd725b3508bb784da46530b214f496bf93 + # via -r requirements.in urllib3==1.26.6 \ --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4aeb246c9db95..4d2c6b1592534 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -36,21 +36,6 @@ updates: schedule: interval: "daily" -- package-ecosystem: "pip" - directory: "/tools/deprecate_features" - schedule: - interval: "daily" - -- package-ecosystem: "pip" - directory: "/tools/deprecate_version" - schedule: - interval: "daily" - -- package-ecosystem: "pip" - directory: "/ci/flaky_test" - schedule: - interval: "daily" - - package-ecosystem: "docker" directory: "/ci" schedule: diff --git a/.github/workflows/check-deps.yml b/.github/workflows/check-deps.yml new file mode 100644 index 0000000000000..48444f2578101 --- /dev/null +++ b/.github/workflows/check-deps.yml @@ -0,0 +1,34 @@ +name: Check for latest_release of deps + +on : + schedule : + - cron : '0 8 * * *' + + workflow_dispatch : + +jobs : + build : + runs-on : ubuntu-latest + steps : + - name : checkout + uses : actions/checkout/@v2 + with : + ref : ${{ github.head_ref }} + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install virtualenv + + - name: setting up virtualenv + run : | + export GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} + # --create_issues flag to create issue only in github action + # and not interfere with the CI + ./tools/dependency/release_dates.sh ./bazel/repository_locations.bzl --create_issues + ./tools/dependency/release_dates.sh ./api/bazel/repository_locations.bzl --create_issues diff --git a/BUILD b/BUILD index 9e35562c085fb..747d512e7e9f4 100644 --- a/BUILD +++ b/BUILD @@ -8,6 +8,11 @@ exports_files([ ".coveragerc", ]) +alias( + name = "envoy", + actual = "//source/exe:envoy", +) + # These two definitions exist to help reduce Envoy upstream core code depending on extensions. # To avoid visibility problems, see notes in source/extensions/extensions_build_config.bzl # diff --git a/CODEOWNERS b/CODEOWNERS index 1cb3aeacdede5..7e2d7dda5390d 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -101,8 +101,6 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/wasm_runtime/ @PiotrSikora @mathetake @lizan # common matcher /*/extensions/common/matcher @mattklein123 @yangminzhu -# common crypto extension -/*/extensions/common/crypto @lizan @bdecoste @asraa /*/extensions/common/proxy_protocol @alyssawilk @wez470 /*/extensions/filters/http/grpc_http1_bridge @snowp @jose /*/extensions/filters/http/gzip @gsagula @dio @@ -110,6 +108,7 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/common/fault @rshriram @alyssawilk /*/extensions/filters/http/grpc_json_transcoder @qiwzhang @lizan /*/extensions/filters/http/router @alyssawilk @mattklein123 @snowp +/*/extensions/filters/common/rbac/matchers @conqerAtapple @ggreenway @alyssawilk /*/extensions/filters/http/grpc_web @fengli79 @lizan /*/extensions/filters/http/grpc_stats @kyessenov @lizan /*/extensions/filters/common/original_src @klarose @snowp @@ -203,3 +202,5 @@ extensions/filters/http/oauth2 @rgs1 @derekargueta @snowp /contrib/mysql_proxy/ @rshriram @venilnoronha /contrib/postgres_proxy/ @fabriziomello @cpakulski @dio /contrib/sxg/ @cpapazian @rgs1 @alyssawilk +/contrib/sip_proxy/ @durd07 @nearbyfly @dorisd0102 +/contrib/cryptomb/ @rojkov @ipuustin diff --git a/EXTENSION_POLICY.md b/EXTENSION_POLICY.md index 7ef47bcd6cf13..2efa3f6ddf9c7 100644 --- a/EXTENSION_POLICY.md +++ b/EXTENSION_POLICY.md @@ -92,8 +92,8 @@ The `status` is one of: The extension status may be adjusted by the extension [CODEOWNERS](./CODEOWNERS) and/or Envoy maintainers based on an assessment of the above criteria. Note that the status of the extension reflects the implementation status. It is orthogonal to the API stability, for example, an extension -with configuration `envoy.foo.v3alpha.Bar` might have a `stable` implementation and -`envoy.foo.v3.Baz` can have a `wip` implementation. +API marked with `(xds.annotations.v3.file_status).work_in_progress` might have a `stable` implementation and +and an extension with a stable config proto can have a `wip` implementation. The `security_posture` is one of: * `robust_to_untrusted_downstream`: The extension is hardened against untrusted downstream traffic. It diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 13342260c7bc1..53659efb67346 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -98,7 +98,8 @@ or you can subscribe to the iCal feed [here](webcal://kubernetes.app.opsgenie.co * From the envoy [landing page](https://github.com/envoyproxy/envoy) use the branch drop-down to create a branch from the tagged release, e.g. "release/v1.6". It will be used for the [stable releases](RELEASES.md#stable-releases). -* Monitor the AZP tag build to make sure that the final docker images get pushed along with +* Tagging will kick off another run of [AZP postsubmit](https://dev.azure.com/cncf/envoy/_build?definitionId=11). Monitor that + tag build to make sure that the final docker images get pushed along with the final docs. The final documentation will end up in the [envoyproxy.github.io repository](https://github.com/envoyproxy/envoyproxy.github.io/tree/main/docs/envoy). * Update the website ([example PR](https://github.com/envoyproxy/envoyproxy.github.io/pull/148)) for the new release. @@ -140,7 +141,7 @@ New Features Deprecated ---------- ``` -* Run the deprecate_versions.py script (e.g. `sh tools/deprecate_version/deprecate_version.sh`) +* Run the deprecate_versions.py script (e.g. `bazel run //tools/deprecate_version:deprecate_version`) to file tracking issues for runtime guarded code which can be removed. * Check source/common/runtime/runtime_features.cc and see if any runtime guards in disabled_runtime_features should be reassessed, and ping on the relevant issues. diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index 97efd55d01fe0..7fb18f6497f9c 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -102,6 +102,19 @@ you may instead just tag the PR with the issue: \#Issue +### Commit + +If this PR fixes or reverts a buggy commit, please add a line of the form: + +Fixes commit #PR + +or + +Fixes commit SHA + +This will allow automated tools to detect tainted commit ranges on the main branch when the PR is +merged. + ### Deprecated If this PR deprecates existing Envoy APIs or code, it should include an update to the deprecated diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index 73e13c82eea39..27f0f9ef9df8a 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -21,5 +21,6 @@ Release Notes: Platform Specific Features: [Optional Runtime guard:] [Optional Fixes #Issue] +[Optional Fixes commit #PR or SHA] [Optional Deprecated:] [Optional [API Considerations](https://github.com/envoyproxy/envoy/blob/main/api/review_checklist.md):] diff --git a/RELEASES.md b/RELEASES.md index c72eeb63805a8..1619b0d22d729 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -72,6 +72,7 @@ deadline of 3 weeks. | 1.17.0 | 2020/12/31 | 2021/01/11 | +11 days | 2022/01/11 | | 1.18.0 | 2021/03/31 | 2021/04/15 | +15 days | 2022/04/15 | | 1.19.0 | 2021/06/30 | 2021/07/13 | +13 days | 2022/07/13 | -| 1.20.0 | 2021/09/30 | | | | +| 1.20.0 | 2021/09/30 | 2021/10/05 | +5 days | 2022/10/13 | +| 1.21.0 | 2021/12/30 | | | | [repokitteh]: https://github.com/repokitteh diff --git a/VERSION b/VERSION index 734375f897d07..c6ba48dc6375f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.20.0-dev +1.21.0-dev diff --git a/api/API_VERSIONING.md b/api/API_VERSIONING.md index f864619602a1d..49e1bae1ec242 100644 --- a/api/API_VERSIONING.md +++ b/api/API_VERSIONING.md @@ -72,8 +72,10 @@ An exception to the above policy exists for: or message has not been included in an Envoy release. * API versions tagged `vNalpha`. Within an alpha major version, arbitrary breaking changes are allowed. * Any field, message or enum with a `[#not-implemented-hide:..` comment. -* Any proto with a `(udpa.annotations.file_status).work_in_progress` option annotation. -* Any proto marked as [#alpha:]. +* Any proto with a `(udpa.annotations.file_status).work_in_progress`, + `(xds.annotations.v3.file_status).work_in_progress` + `(xds.annotations.v3.message_status).work_in_progress`, or + `(xds.annotations.v3.field_status).work_in_progress` option annotation. Note that changes to default values for wrapped types, e.g. `google.protobuf.UInt32Value` are not governed by the above policy. Any management server requiring stability across Envoy API or diff --git a/api/BUILD b/api/BUILD index 93f9184a2b400..d8cffd4a48f78 100644 --- a/api/BUILD +++ b/api/BUILD @@ -64,10 +64,14 @@ proto_library( "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", + "//contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha:pkg", + "//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg", + "//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg", "//envoy/admin/v3:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/key_value/v3:pkg", "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", @@ -93,15 +97,14 @@ proto_library( "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", - "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg", + "//envoy/extensions/access_loggers/open_telemetry/v3:pkg", "//envoy/extensions/access_loggers/stream/v3:pkg", "//envoy/extensions/access_loggers/wasm/v3:pkg", - "//envoy/extensions/cache/simple_http_cache/v3alpha:pkg", + "//envoy/extensions/cache/simple_http_cache/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", "//envoy/extensions/common/matching/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", @@ -113,14 +116,14 @@ proto_library( "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/common/matcher/action/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", - "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", + "//envoy/extensions/filters/http/admission_control/v3:pkg", "//envoy/extensions/filters/http/alternate_protocols_cache/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", - "//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg", + "//envoy/extensions/filters/http/bandwidth_limit/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", - "//envoy/extensions/filters/http/cache/v3alpha:pkg", - "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", + "//envoy/extensions/filters/http/cache/v3:pkg", + "//envoy/extensions/filters/http/cdn_loop/v3:pkg", "//envoy/extensions/filters/http/composite/v3:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", @@ -129,7 +132,7 @@ proto_library( "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", + "//envoy/extensions/filters/http/ext_proc/v3:pkg", "//envoy/extensions/filters/http/fault/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg", @@ -144,7 +147,7 @@ proto_library( "//envoy/extensions/filters/http/kill_request/v3:pkg", "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", "//envoy/extensions/filters/http/lua/v3:pkg", - "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", + "//envoy/extensions/filters/http/oauth2/v3:pkg", "//envoy/extensions/filters/http/on_demand/v3:pkg", "//envoy/extensions/filters/http/original_src/v3:pkg", "//envoy/extensions/filters/http/ratelimit/v3:pkg", @@ -172,14 +175,14 @@ proto_library( "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", - "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", + "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/router/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", - "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/formatter/metadata/v3:pkg", "//envoy/extensions/formatter/req_without_query/v3:pkg", @@ -198,6 +201,7 @@ proto_library( "//envoy/extensions/quic/crypto_stream/v3:pkg", "//envoy/extensions/quic/proof_source/v3:pkg", "//envoy/extensions/rate_limit_descriptors/expr/v3:pkg", + "//envoy/extensions/rbac/matchers/upstream_ip_port/v3:pkg", "//envoy/extensions/request_id/uuid/v3:pkg", "//envoy/extensions/resource_monitors/fixed_heap/v3:pkg", "//envoy/extensions/resource_monitors/injected_resource/v3:pkg", @@ -211,7 +215,7 @@ proto_library( "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/s2a/v3alpha:pkg", + "//envoy/extensions/transport_sockets/s2a/v3:pkg", "//envoy/extensions/transport_sockets/starttls/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", @@ -221,14 +225,14 @@ proto_library( "//envoy/extensions/upstreams/http/v3:pkg", "//envoy/extensions/upstreams/tcp/generic/v3:pkg", "//envoy/extensions/wasm/v3:pkg", - "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", + "//envoy/extensions/watchdog/profile_action/v3:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", "//envoy/service/cluster/v3:pkg", "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", - "//envoy/service/ext_proc/v3alpha:pkg", + "//envoy/service/ext_proc/v3:pkg", "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", @@ -246,7 +250,7 @@ proto_library( "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", - "//envoy/watchdog/v3alpha:pkg", + "//envoy/watchdog/v3:pkg", ], ) diff --git a/api/STYLE.md b/api/STYLE.md index b185be97c9687..5689d1162a08e 100644 --- a/api/STYLE.md +++ b/api/STYLE.md @@ -34,10 +34,13 @@ In addition, the following conventions should be followed: implementation. These indicate that the entity is not implemented in Envoy and the entity should be hidden from the Envoy documentation. -* Use a `[#alpha:]` annotation in comments for messages that are considered alpha - and are not subject to the threat model. This is similar to the work-in-progress/alpha tagging - of extensions described below, but allows tagging messages that are used as part of the core API - as alpha without having to break it into its own file. +* Use a `(xds.annotations.v3.file_status).work_in_progress`, + `(xds.annotations.v3.message_status).work_in_progress`, or + `(xds.annotations.v3.field_status).work_in_progress` option annotation for files, + messages, or fields, respectively, that are considered work in progress and are not subject to the + threat model or the breaking change policy. This is similar to the work-in-progress/alpha tagging + of extensions described below, but allows tagging protos that are used as part of the core API + as work in progress without having to break them into their own file. * Always use plural field names for `repeated` fields, such as `filters`. @@ -144,9 +147,8 @@ To add an extension config to the API, the steps below should be followed: (`option (udpa.annotations.file_status).package_version_status = ACTIVE;`). This is required to automatically include the config proto in [api/versioning/BUILD](versioning/BUILD). 1. Add a reference to the v3 extension config in (1) in [api/versioning/BUILD](versioning/BUILD) under `active_protos`. -1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file, - reformat `foobar.proto` as needed and also generate the shadow API protos. -1. `git add api/ generated_api_shadow/` to add any new files to your Git index. +1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file and + reformat `foobar.proto` as needed. ## API annotations diff --git a/api/bazel/BUILD b/api/bazel/BUILD index 0e5c8aea75b01..a8b7b161067fd 100644 --- a/api/bazel/BUILD +++ b/api/bazel/BUILD @@ -1,4 +1,6 @@ load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") +load("//:utils.bzl", "json_data") +load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC") licenses(["notice"]) # Apache 2 @@ -15,3 +17,8 @@ go_proto_compiler( valid_archive = False, visibility = ["//visibility:public"], ) + +json_data( + name = "repository_locations", + data = REPOSITORY_LOCATIONS_SPEC, +) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index be1e9c9789e4b..0e1a19f1b8ddd 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -4,9 +4,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "bazel-skylib", project_desc = "Common useful functions and rules for Bazel", project_url = "https://github.com/bazelbuild/bazel-skylib", - version = "1.0.3", - sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", - release_date = "2020-08-27", + version = "1.1.1", + sha256 = "c6966ec828da198c5d9adbaa94c05e3a1c7f21bd012a0b29ba8ddbccb2c93b0d", + release_date = "2021-09-27", urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"], use_category = ["api"], ), @@ -32,9 +32,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Bazel build tools", project_desc = "Developer tools for working with Google's bazel buildtool.", project_url = "https://github.com/bazelbuild/buildtools", - version = "4.0.1", - sha256 = "c28eef4d30ba1a195c6837acf6c75a4034981f5b4002dda3c5aa6e48ce023cf1", - release_date = "2021-03-01", + version = "4.2.2", + sha256 = "ae34c344514e08c23e90da0e2d6cb700fcd28e80c02e23e4d5715dddcb42f7b3", + release_date = "2021-10-07", strip_prefix = "buildtools-{version}", urls = ["https://github.com/bazelbuild/buildtools/archive/{version}.tar.gz"], use_category = ["api"], @@ -44,9 +44,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_desc = "xDS API Working Group (xDS-WG)", project_url = "https://github.com/cncf/xds", # During the UDPA -> xDS migration, we aren't working with releases. - version = "dd25fe81a44506ab21ea666fb70b3b1c4bb183ee", - sha256 = "9184235cd31272679e4c7f9232c341d4ea75351ded74d3fbba28b05c290bfa71", - release_date = "2021-07-22", + version = "c0841ac0dd72f6d26903f7e68fa64bd038533ba5", + sha256 = "ddd12de0fab2356db6c353e2ae75a21d83712c869aeb0ec73b215ca3eba9ee77", + release_date = "2021-10-07", strip_prefix = "xds-{version}", urls = ["https://github.com/cncf/xds/archive/{version}.tar.gz"], use_category = ["api"], @@ -100,20 +100,20 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Protobuf Rules for Bazel", project_desc = "Protocol buffer rules for Bazel", project_url = "https://github.com/bazelbuild/rules_proto", - version = "f7a30f6f80006b591fa7c437fe5a951eb10bcbcf", - sha256 = "9fc210a34f0f9e7cc31598d109b5d069ef44911a82f507d5a88716db171615a8", - release_date = "2021-02-09", + version = "4.0.0", + sha256 = "66bfdf8782796239d3875d37e7de19b1d94301e8972b3cbd2446b332429b4df1", + release_date = "2021-09-15", strip_prefix = "rules_proto-{version}", - urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"], + urls = ["https://github.com/bazelbuild/rules_proto/archive/refs/tags/{version}.tar.gz"], use_category = ["api"], ), opentelemetry_proto = dict( project_name = "OpenTelemetry Proto", project_desc = "Language Independent Interface Types For OpenTelemetry", project_url = "https://github.com/open-telemetry/opentelemetry-proto", - version = "0.9.0", - sha256 = "9ec38ab51eedbd7601979b0eda962cf37bc8a4dc35fcef604801e463f01dcc00", - release_date = "2021-05-12", + version = "0.11.0", + sha256 = "985367f8905e91018e636cbf0d83ab3f834b665c4f5899a27d10cae9657710e2", + release_date = "2021-10-07", strip_prefix = "opentelemetry-proto-{version}", urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/v{version}.tar.gz"], use_category = ["api"], diff --git a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD index ee92fb652582e..ec1e778e06e5c 100644 --- a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD +++ b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], + deps = [ + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", + ], ) diff --git a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto index 03a6522852ab5..88fd46c3a8569 100644 --- a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto +++ b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto @@ -2,14 +2,16 @@ syntax = "proto3"; package envoy.extensions.filters.network.kafka_mesh.v3alpha; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_mesh.v3alpha"; option java_outer_classname = "KafkaMeshProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; // [#protodoc-title: Kafka Mesh] // Kafka Mesh :ref:`configuration overview `. diff --git a/api/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD b/api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/BUILD similarity index 100% rename from api/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD rename to api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/BUILD diff --git a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto b/api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.proto similarity index 50% rename from generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto rename to api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.proto index 5463ab6513bee..4b7accacf406f 100644 --- a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto +++ b/api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.proto @@ -1,17 +1,16 @@ syntax = "proto3"; -package envoy.config.filter.thrift.router.v2alpha1; +package envoy.extensions.filters.network.sip_proxy.router.v3alpha; import "udpa/annotations/status.proto"; -option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1"; +option java_package = "io.envoyproxy.envoy.extensions.filters.network.sip_proxy.router.v3alpha"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] -// Thrift router :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.router] +// [#extension: envoy.filters.sip.router] message Router { } diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD rename to api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/BUILD diff --git a/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.proto b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.proto new file mode 100644 index 0000000000000..03c17a8ede82e --- /dev/null +++ b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.sip_proxy.v3alpha; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.sip_proxy.v3alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Sip Proxy Route Configuration] + +message RouteConfiguration { + // The name of the route configuration. Reserved for future use in asynchronous route discovery. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + oneof match_specifier { + option (validate.required) = true; + + // The domain from Request URI or Route Header. + string domain = 1; + } +} + +message RouteAction { + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates a single upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_len: 1}]; + } +} diff --git a/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.proto b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.proto new file mode 100644 index 0000000000000..380ee714f40c2 --- /dev/null +++ b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.proto @@ -0,0 +1,108 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.sip_proxy.v3alpha; + +import "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.sip_proxy.v3alpha"; +option java_outer_classname = "SipProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Sip Proxy] +// [#extension: envoy.filters.network.sip_proxy] + +message SipProxy { + message SipSettings { + // transaction timeout timer [Timer B] unit is milliseconds, default value 64*T1. + // + // Session Initiation Protocol (SIP) timer summary + // + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer | Default value | Section | Meaning | + // +=========+=========================+==========+==============================================================================+ + // | T1 | 500 ms | 17.1.1.1 | Round-trip time (RTT) estimate | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | T2 | 4 sec | 17.1.2.2 | Maximum re-transmission interval for non-INVITE requests and INVITE responses| + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | T4 | 5 sec | 17.1.2.2 | Maximum duration that a message can remain in the network | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer A | initially T1 | 17.1.1.2 | INVITE request re-transmission interval, for UDP only | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer B | 64*T1 | 17.1.1.2 | INVITE transaction timeout timer | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer D | > 32 sec. for UDP | 17.1.1.2 | Wait time for response re-transmissions | + // | | 0 sec. for TCP and SCTP | | | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer E | initially T1 | 17.1.2.2 | Non-INVITE request re-transmission interval, UDP only | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer F | 64*T1 | 17.1.2.2 | Non-INVITE transaction timeout timer | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer G | initially T1 | 17.2.1 | INVITE response re-transmission interval | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer H | 64*T1 | 17.2.1 | Wait time for ACK receipt | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer I | T4 for UDP | 17.2.1 | Wait time for ACK re-transmissions | + // | | 0 sec. for TCP and SCTP | | | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer J | 64*T1 for UDP | 17.2.2 | Wait time for re-transmissions of non-INVITE requests | + // | | 0 sec. for TCP and SCTP | | | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + // | Timer K | T4 for UDP | 17.1.2.2 | Wait time for response re-transmissions | + // | | 0 sec. for TCP and SCTP | | | + // +---------+-------------------------+----------+------------------------------------------------------------------------------+ + google.protobuf.Duration transaction_timeout = 1; + + // own domain name + string own_domain = 2; + + // points to domain match with own_domain + string domain_match_parameter_name = 3; + } + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // The route table for the connection manager is static and is specified in this property. + RouteConfiguration route_config = 2; + + // A list of individual Sip filters that make up the filter chain for requests made to the + // Sip proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no sip_filters are specified, a default Sip router filter + // (`envoy.filters.sip.router`) is used. + // [#extension-category: envoy.sip_proxy.filters] + repeated SipFilter sip_filters = 3; + + SipSettings settings = 4; +} + +// SipFilter configures a Sip filter. +message SipFilter { + // The name of the filter to instantiate. The name must match a supported + // filter. The built-in filters are: + // + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// SipProtocolOptions specifies Sip upstream protocol options. This object is used in +// :ref:`typed_extension_protocol_options`, +// keyed by the name `envoy.filters.network.sip_proxy`. +message SipProtocolOptions { + // All sip messages in one dialog should go to the same endpoint. + bool session_affinity = 1; + + // The Register with Authorization header should go to the same endpoint which send out the 401 Unauthorized. + bool registration_affinity = 2; +} diff --git a/api/envoy/extensions/common/key_value/v3/BUILD b/api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/BUILD similarity index 100% rename from api/envoy/extensions/common/key_value/v3/BUILD rename to api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/BUILD diff --git a/api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.proto b/api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.proto new file mode 100644 index 0000000000000..aa2d8cd2fb823 --- /dev/null +++ b/api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package envoy.extensions.private_key_providers.cryptomb.v3alpha; + +import "envoy/config/core/v3/base.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.private_key_providers.cryptomb.v3alpha"; +option java_outer_classname = "CryptombProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: CryptoMb private key provider] +// [#extension: envoy.tls.key_providers.cryptomb] + +// A CryptoMbPrivateKeyMethodConfig message specifies how the CryptoMb private +// key provider is configured. The private key provider provides `SIMD` +// processing for RSA sign and decrypt operations (ECDSA signing uses regular +// BoringSSL functions). The provider works by gathering the operations into a +// worker-thread specific queue, and processing the queue using `ipp-crypto` +// library when the queue is full or when a timer expires. +// [#extension-category: envoy.tls.key_providers] +message CryptoMbPrivateKeyMethodConfig { + // Private key to use in the private key provider. If set to inline_bytes or + // inline_string, the value needs to be the private key in PEM format. + config.core.v3.DataSource private_key = 1 [(udpa.annotations.sensitive) = true]; + + // How long to wait until the per-thread processing queue should be + // processed. If the processing queue gets full (eight sign or decrypt + // requests are received) it is processed immediately. However, if the + // queue is not filled before the delay has expired, the requests + // already in the queue are processed, even if the queue is not full. + // In effect, this value controls the balance between latency and + // throughput. The duration needs to be set to a non-zero value. + google.protobuf.Duration poll_delay = 2 [(validate.rules).duration = { + required: true + gt {} + }]; +} diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index d6213d6fe9488..bcedfa509818e 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -43,7 +43,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 56] +// [#next-free-field: 57] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -123,15 +123,23 @@ message Cluster { // only perform a lookup for addresses in the IPv6 family. If AUTO is // specified, the DNS resolver will first perform a lookup for addresses in // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + // This is semantically equivalent to a non-existent V6_PREFERRED option. + // AUTO is a legacy name that is more opaque than + // necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API. + // If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the + // IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback + // target will only get v6 addresses if there were NO v4 addresses to return. // For cluster types other than // :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS`, // this setting is // ignored. + // [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.] enum DnsLookupFamily { AUTO = 0; V4_ONLY = 1; V6_ONLY = 2; + V4_PREFERRED = 3; } enum ClusterProtocolSelection { @@ -337,6 +345,35 @@ message Cluster { bool list_as_any = 7; } + // Configuration for :ref:`slow start mode `. + message SlowStartConfig { + // Represents the size of slow start window. + // If set, the newly created host remains in slow start mode starting from its creation time + // for the duration of slow start window. + google.protobuf.Duration slow_start_window = 1; + + // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + // so that endpoint would get linearly increasing amount of traffic. + // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + // The value of aggression parameter should be greater than 0.0. + // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + // + // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + // `new_weight = weight * time_factor ^ (1 / aggression)`, + // where `time_factor=(time_since_start_seconds / slow_start_time_seconds)`. + // + // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + // Once host exits slow start, time_factor and aggression no longer affect its weight. + core.v3.RuntimeDouble aggression = 2; + } + + // Specific configuration for the RoundRobin load balancing policy. + message RoundRobinLbConfig { + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig slow_start_config = 1; + } + // Specific configuration for the LeastRequest load balancing policy. message LeastRequestLbConfig { option (udpa.annotations.versioning).previous_message_type = @@ -370,6 +407,10 @@ message Cluster { // .. note:: // This setting only takes effect if all host weights are not equal. core.v3.RuntimeDouble active_request_bias = 2; + + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig slow_start_config = 3; } // Specific configuration for the :ref:`RingHash` @@ -951,6 +992,9 @@ message Cluster { // Optional configuration for the LeastRequest load balancing policy. LeastRequestLbConfig least_request_lb_config = 37; + + // Optional configuration for the RoundRobin load balancing policy. + RoundRobinLbConfig round_robin_lb_config = 56; } // Common configuration for all load balancer implementations. diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD b/api/envoy/config/common/key_value/v3/BUILD similarity index 84% rename from api/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD rename to api/envoy/config/common/key_value/v3/BUILD index 1c1a6f6b44235..e9b556d681cfd 100644 --- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD +++ b/api/envoy/config/common/key_value/v3/BUILD @@ -8,5 +8,6 @@ api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto b/api/envoy/config/common/key_value/v3/config.proto similarity index 60% rename from generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto rename to api/envoy/config/common/key_value/v3/config.proto index 66a55435437b3..8d62c09863083 100644 --- a/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto +++ b/api/envoy/config/common/key_value/v3/config.proto @@ -1,22 +1,25 @@ syntax = "proto3"; -package envoy.extensions.common.key_value.v3; +package envoy.config.common.key_value.v3; import "envoy/config/core/v3/extension.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.common.key_value.v3"; +option java_package = "io.envoyproxy.envoy.config.common.key_value.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Key Value Store storage plugin] -// [#alpha:] // This shared configuration for Envoy key value stores. message KeyValueStoreConfig { + option (xds.annotations.v3.message_status).work_in_progress = true; + // [#extension-category: envoy.common.key_value] - config.core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}]; + core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/common/matcher/v3/BUILD b/api/envoy/config/common/matcher/v3/BUILD index 2f90ace882d93..221350b756d44 100644 --- a/api/envoy/config/common/matcher/v3/BUILD +++ b/api/envoy/config/common/matcher/v3/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", ], ) diff --git a/api/envoy/config/common/matcher/v3/matcher.proto b/api/envoy/config/common/matcher/v3/matcher.proto index d7deb71d0b469..1fb8c83ec3ef7 100644 --- a/api/envoy/config/common/matcher/v3/matcher.proto +++ b/api/envoy/config/common/matcher/v3/matcher.proto @@ -6,6 +6,8 @@ import "envoy/config/core/v3/extension.proto"; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/string.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -21,9 +23,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // is found the action specified by the most specific on_no_match will be evaluated. // As an on_no_match might result in another matching tree being evaluated, this process // might repeat several times until the final OnMatch (or no match) is decided. -// -// [#alpha:] message Matcher { + option (xds.annotations.v3.message_status).work_in_progress = true; + // What to do if a match is successful. message OnMatch { oneof on_match { diff --git a/api/envoy/config/core/v3/BUILD b/api/envoy/config/core/v3/BUILD index 72e10b6df8440..3fbb6b0e1f186 100644 --- a/api/envoy/config/core/v3/BUILD +++ b/api/envoy/config/core/v3/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", "@com_github_cncf_udpa//xds/core/v3:pkg", ], ) diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto index d6c507b8dec9a..efa8ec5186f46 100644 --- a/api/envoy/config/core/v3/base.proto +++ b/api/envoy/config/core/v3/base.proto @@ -296,6 +296,15 @@ message RuntimeFeatureFlag { string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } +// Query parameter name/value pair. +message QueryParameter { + // The key of the query parameter. Case sensitive. + string key = 1 [(validate.rules).string = {min_len: 1}]; + + // The value of the query parameter. + string value = 2; +} + // Header name/value pair. message HeaderValue { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValue"; @@ -320,12 +329,33 @@ message HeaderValueOption { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValueOption"; + // Describes the supported actions types for header append action. + enum HeaderAppendAction { + // This action will append the specified value to the existing values if the header + // already exists. If the header doesn't exist then this will add the header with + // specified key and value. + APPEND_IF_EXISTS_OR_ADD = 0; + + // This action will add the header if it doesn't already exist. If the header + // already exists then this will be a no-op. + ADD_IF_ABSENT = 1; + + // This action will overwrite the specified value by discarding any existing values if + // the header already exists. If the header doesn't exist then this will add the header + // with specified key and value. + OVERWRITE_IF_EXISTS_OR_ADD = 2; + } + // Header name/value pair that this option applies to. HeaderValue header = 1 [(validate.rules).message = {required: true}]; // Should the value be appended? If true (default), the value is appended to // existing values. Otherwise it replaces any existing values. google.protobuf.BoolValue append = 2; + + // [#not-implemented-hide:] Describes the action taken to append/overwrite the given value for an existing header + // or to only add this header if it's absent. Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD`. + HeaderAppendAction append_action = 3 [(validate.rules).enum = {defined_only: true}]; } // Wrapper for a set of headers. diff --git a/api/envoy/config/core/v3/health_check.proto b/api/envoy/config/core/v3/health_check.proto index 304297e7c011c..81a9b6c7f1535 100644 --- a/api/envoy/config/core/v3/health_check.proto +++ b/api/envoy/config/core/v3/health_check.proto @@ -73,7 +73,7 @@ message HealthCheck { } } - // [#next-free-field: 12] + // [#next-free-field: 13] message HttpHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck.HttpHealthCheck"; @@ -118,6 +118,18 @@ message HealthCheck { // range are required. Only statuses in the range [100, 600) are allowed. repeated type.v3.Int64Range expected_statuses = 9; + // Specifies a list of HTTP response statuses considered retriable. If provided, responses in this range + // will count towards the configured :ref:`unhealthy_threshold `, + // but will not result in the host being considered immediately unhealthy. Ranges follow half-open semantics of + // :ref:`Int64Range `. The start and end of each range are required. + // Only statuses in the range [100, 600) are allowed. The :ref:`expected_statuses ` + // field takes precedence for any range overlaps with this field i.e. if status code 200 is both retriable and expected, a 200 response will + // be considered a successful health check. By default all responses not in + // :ref:`expected_statuses ` will result in + // the host being considered immediately unhealthy i.e. if status code 200 is expected and there are no configured retriable statuses, any + // non-200 response will result in the host being marked unhealthy. + repeated type.v3.Int64Range retriable_statuses = 12; + // Use specified application protocol for health checks. type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; @@ -243,8 +255,10 @@ message HealthCheck { uint32 interval_jitter_percent = 18; // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. + // unhealthy. Note that for *http* health checking if a host responds with a code not in + // :ref:`expected_statuses ` + // or :ref:`retriable_statuses `, + // this threshold is ignored and the host is considered immediately unhealthy. google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; // The number of healthy health checks required before a host is marked diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 8f2347eb55179..4535b16667388 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -8,6 +8,8 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "xds/annotations/v3/status.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -60,15 +62,26 @@ message UpstreamHttpProtocolOptions { "envoy.api.v2.core.UpstreamHttpProtocolOptions"; // Set transport socket `SNI `_ for new - // upstream connections based on the downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. + // upstream connections based on the downstream HTTP host/authority header or any other arbitrary + // header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. bool auto_sni = 1; // Automatic validate upstream presented certificate for new upstream connections based on the - // downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - // This field is intended to set with `auto_sni` field. + // downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. + // This field is intended to be set with `auto_sni` field. bool auto_san_validation = 2; + + // An optional alternative to the host/authority header to be used for setting the SNI value. + // It should be a valid downstream HTTP header, as seen by the + // :ref:`router filter `. + // If unset, host/authority header will be used for populating the SNI. If the specified header + // is not found or the value is empty, host/authority header will be used instead. + // This field is intended to be set with `auto_sni` and/or `auto_san_validation` fields. + // If none of these fields are set then setting this would be a no-op. + string override_auto_sni_header = 3 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; } // Configures the alternate protocols cache which tracks alternate protocols that can be used to @@ -91,6 +104,12 @@ message AlternateProtocolsCacheOptions { // it is possible for the maximum entries in the cache to go slightly above the configured // value depending on timing. This is similar to how other circuit breakers work. google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; + + // Allows configuring a persistent + // :ref:`key value store ` to flush + // alternate protocols entries to disk. + // This function is currently only supported if concurrency is 1 + TypedExtensionConfig key_value_store_config = 3; } // [#next-free-field: 7] @@ -138,10 +157,11 @@ message HttpProtocolOptions { // The maximum duration of a connection. The duration is defined as a period since a connection // was established. If not set, there is no max duration. When max_connection_duration is reached - // the connection will be closed. Drain sequence will occur prior to closing the connection if - // if's applicable. See :ref:`drain_timeout + // and if there are no active streams, the connection will be closed. If there are any active streams, + // the drain sequence will kick-in, and the connection will be force-closed after the drain period. + // See :ref:`drain_timeout // `. - // Note: not implemented for upstream connections. + // Note: This feature is not yet implemented for the upstream connections. google.protobuf.Duration max_connection_duration = 3; // The maximum number of headers. If unconfigured, the default @@ -473,6 +493,7 @@ message GrpcProtocolOptions { } // A message which allows using HTTP/3. +// [#next-free-field: 6] message Http3ProtocolOptions { QuicProtocolOptions quic_protocol_options = 1; @@ -483,6 +504,14 @@ message Http3ProtocolOptions { // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging // `. google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 2; + + // Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using + // the header mechanisms from the `HTTP/2 extended connect RFC + // `_ + // and settings `proposed for HTTP/3 + // `_ + // Note that HTTP/3 CONNECT is not yet an RFC. + bool allow_extended_connect = 5 [(xds.annotations.v3.field_status).work_in_progress = true]; } // A message to control transformations to the :scheme header diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index 07044f92201e9..847e36f163ba2 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -390,7 +390,8 @@ message FilterStateRule { // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. - map requires = 3; + map + requires = 3; } // This is the Envoy HTTP filter config for JWT authentication. diff --git a/api/envoy/config/rbac/v3/rbac.proto b/api/envoy/config/rbac/v3/rbac.proto index d66f9be2b4981..474f30a285633 100644 --- a/api/envoy/config/rbac/v3/rbac.proto +++ b/api/envoy/config/rbac/v3/rbac.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.rbac.v3; import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/path.proto"; @@ -146,7 +147,7 @@ message Policy { } // Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 12] +// [#next-free-field: 13] message Permission { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission"; @@ -218,6 +219,10 @@ message Permission { // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. type.matcher.v3.StringMatcher requested_server_name = 9; + + // Extension for configuring custom matchers for RBAC. + // [#extension-category: envoy.rbac.matchers] + core.v3.TypedExtensionConfig matcher = 12; } } diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index e6be0c43ed0ac..d25edd756db5f 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -529,6 +529,14 @@ message RouteMatch { // against all the specified query parameters. If the number of specified // query parameters is nonzero, they all must match the *path* header's // query string for a match to occur. + // + // .. note:: + // + // If query parameters are used to pass request message fields when + // `grpc_json_transcoder `_ + // is used, the transcoded message fields maybe different. The query parameters are + // url encoded, but the message fields are not. For example, if a query + // parameter is "foo%20bar", the message field will be "foo bar". repeated QueryParameterMatcher query_parameters = 7; // If specified, only gRPC requests will be matched. The router will check @@ -1164,7 +1172,7 @@ message RouteAction { } // HTTP retry :ref:`architecture overview `. -// [#next-free-field: 12] +// [#next-free-field: 14] message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy"; @@ -1305,8 +1313,8 @@ message RetryPolicy { google.protobuf.UInt32Value num_retries = 2 [(udpa.annotations.field_migrate).rename = "max_retries"]; - // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - // same conditions documented for + // Specifies a non-zero upstream timeout per retry attempt (including the initial attempt). This + // parameter is optional. The same conditions documented for // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. // // .. note:: @@ -1318,6 +1326,27 @@ message RetryPolicy { // would have been exhausted. google.protobuf.Duration per_try_timeout = 3; + // Specifies an upstream idle timeout per retry attempt (including the initial attempt). This + // parameter is optional and if absent there is no per try idle timeout. The semantics of the per + // try idle timeout are similar to the + // :ref:`route idle timeout ` and + // :ref:`stream idle timeout + // ` + // both enforced by the HTTP connection manager. The difference is that this idle timeout + // is enforced by the router for each individual attempt and thus after all previous filters have + // run, as opposed to *before* all previous filters run for the other idle timeouts. This timeout + // is useful in cases in which total request timeout is bounded by a number of retries and a + // :ref:`per_try_timeout `, but + // there is a desire to ensure each try is making incremental progress. Note also that similar + // to :ref:`per_try_timeout `, + // this idle timeout does not start until after both the entire request has been received by the + // router *and* a connection pool connection has been obtained. Unlike + // :ref:`per_try_timeout `, + // the idle timer continues once the response starts streaming back to the downstream client. + // This ensures that response data continues to make progress without using one of the HTTP + // connection manager idle timeouts. + google.protobuf.Duration per_try_idle_timeout = 13; + // Specifies an implementation of a RetryPriority which is used to determine the // distribution of load across priorities used for retries. Refer to // :ref:`retry plugin configuration ` for more details. @@ -1329,6 +1358,11 @@ message RetryPolicy { // details. repeated RetryHostPredicate retry_host_predicate = 5; + // Retry options predicates that will be applied prior to retrying a request. These predicates + // allow customizing request behavior between retries. + // [#comment: add [#extension-category: envoy.retry_options_predicates] when there are built-in extensions] + repeated core.v3.TypedExtensionConfig retry_options_predicates = 12; + // The maximum number of times host selection will be reattempted before giving up, at which // point the host that was last selected will be routed to. If unspecified, this will default to // retrying once. diff --git a/api/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD b/api/envoy/extensions/access_loggers/open_telemetry/v3/BUILD similarity index 100% rename from api/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD rename to api/envoy/extensions/access_loggers/open_telemetry/v3/BUILD diff --git a/api/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto b/api/envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto similarity index 93% rename from api/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto rename to api/envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto index 1b7027133e153..cd4a63181290f 100644 --- a/api/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto +++ b/api/envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.access_loggers.open_telemetry.v3alpha; +package envoy.extensions.access_loggers.open_telemetry.v3; import "envoy/extensions/access_loggers/grpc/v3/als.proto"; @@ -9,10 +9,9 @@ import "opentelemetry/proto/common/v1/common.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v3"; option java_outer_classname = "LogsServiceProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: OpenTelemetry (gRPC) Access Log] diff --git a/api/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD b/api/envoy/extensions/cache/simple_http_cache/v3/BUILD similarity index 100% rename from api/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD rename to api/envoy/extensions/cache/simple_http_cache/v3/BUILD diff --git a/api/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto b/api/envoy/extensions/cache/simple_http_cache/v3/config.proto similarity index 83% rename from api/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto rename to api/envoy/extensions/cache/simple_http_cache/v3/config.proto index 1b42e9b3f93d4..e7bd7cdbdf91a 100644 --- a/api/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto +++ b/api/envoy/extensions/cache/simple_http_cache/v3/config.proto @@ -1,10 +1,10 @@ syntax = "proto3"; -package envoy.extensions.cache.simple_http_cache.v3alpha; +package envoy.extensions.cache.simple_http_cache.v3; import "udpa/annotations/status.proto"; -option java_package = "io.envoyproxy.envoy.extensions.cache.simple_http_cache.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.cache.simple_http_cache.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD b/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD index 6e07b4a9226bb..b9cc22c7ee67c 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD @@ -8,8 +8,8 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/key_value/v3:pkg", "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 4a0d87ff6c3b8..e3904ae287192 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -3,10 +3,10 @@ syntax = "proto3"; package envoy.extensions.common.dynamic_forward_proxy.v3; import "envoy/config/cluster/v3/cluster.proto"; +import "envoy/config/common/key_value/v3/config.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/resolver.proto"; -import "envoy/extensions/common/key_value/v3/config.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -142,5 +142,5 @@ message DnsCacheConfig { // [#not-implemented-hide:] // Configuration to flush the DNS cache to long term storage. - key_value.v3.KeyValueStoreConfig key_value_config = 13; + config.common.key_value.v3.KeyValueStoreConfig key_value_config = 13; } diff --git a/api/envoy/extensions/common/key_value/v3/config.proto b/api/envoy/extensions/common/key_value/v3/config.proto deleted file mode 100644 index 66a55435437b3..0000000000000 --- a/api/envoy/extensions/common/key_value/v3/config.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.key_value.v3; - -import "envoy/config/core/v3/extension.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.key_value.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Key Value Store storage plugin] - -// [#alpha:] -// This shared configuration for Envoy key value stores. -message KeyValueStoreConfig { - // [#extension-category: envoy.common.key_value] - config.core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/extensions/common/matching/v3/BUILD b/api/envoy/extensions/common/matching/v3/BUILD index 1afd4545d9608..de9e120297ac4 100644 --- a/api/envoy/extensions/common/matching/v3/BUILD +++ b/api/envoy/extensions/common/matching/v3/BUILD @@ -10,6 +10,7 @@ api_proto_package( "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", "@com_github_cncf_udpa//xds/type/matcher/v3:pkg", ], ) diff --git a/api/envoy/extensions/common/matching/v3/extension_matcher.proto b/api/envoy/extensions/common/matching/v3/extension_matcher.proto index eee82a381633b..10bd3b7389a69 100644 --- a/api/envoy/extensions/common/matching/v3/extension_matcher.proto +++ b/api/envoy/extensions/common/matching/v3/extension_matcher.proto @@ -5,6 +5,7 @@ package envoy.extensions.common.matching.v3; import "envoy/config/common/matcher/v3/matcher.proto"; import "envoy/config/core/v3/extension.proto"; +import "xds/annotations/v3/status.proto"; import "xds/type/matcher/v3/matcher.proto"; import "envoy/annotations/deprecation.proto"; @@ -21,9 +22,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Wrapper around an existing extension that provides an associated matcher. This allows // decorating an existing extension with a matcher, which can be used to match against // relevant protocol data. -// -// [#alpha:] message ExtensionWithMatcher { + option (xds.annotations.v3.message_status).work_in_progress = true; + // The associated matcher. This is deprecated in favor of xds_matcher. config.common.matcher.v3.Matcher matcher = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD b/api/envoy/extensions/filters/http/admission_control/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD rename to api/envoy/extensions/filters/http/admission_control/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/api/envoy/extensions/filters/http/admission_control/v3/admission_control.proto similarity index 96% rename from generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto rename to api/envoy/extensions/filters/http/admission_control/v3/admission_control.proto index 9bb3603f9ebd6..702f03019b1c4 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto +++ b/api/envoy/extensions/filters/http/admission_control/v3/admission_control.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.http.admission_control.v3alpha; +package envoy.extensions.filters.http.admission_control.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/range.proto"; @@ -10,10 +10,9 @@ import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3"; option java_outer_classname = "AdmissionControlProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Admission Control] diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto deleted file mode 100644 index 9bb3603f9ebd6..0000000000000 --- a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto +++ /dev/null @@ -1,103 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.admission_control.v3alpha; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha"; -option java_outer_classname = "AdmissionControlProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Admission Control] -// [#extension: envoy.filters.http.admission_control] - -// [#next-free-field: 8] -message AdmissionControl { - // Default method of specifying what constitutes a successful request. All status codes that - // indicate a successful request must be explicitly specified if not relying on the default - // values. - message SuccessCriteria { - message HttpCriteria { - // Status code ranges that constitute a successful request. Configurable codes are in the - // range [100, 600). - repeated type.v3.Int32Range http_success_status = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - message GrpcCriteria { - // Status codes that constitute a successful request. - // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. - repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful - // responses. - // - // .. note:: - // - // The default HTTP codes considered successful by the admission controller are done so due - // to the unlikelihood that sending fewer requests would change their behavior (for example: - // redirects, unauthorized access, or bad requests won't be alleviated by sending less - // traffic). - HttpCriteria http_criteria = 1; - - // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok, - // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated, - // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented. - // - // .. note:: - // - // The default gRPC codes that are considered successful by the admission controller are - // chosen because of the unlikelihood that sending fewer requests will change the behavior. - GrpcCriteria grpc_criteria = 2; - } - - // If set to false, the admission control filter will operate as a pass-through filter. If the - // message is unspecified, the filter will be enabled. - config.core.v3.RuntimeFeatureFlag enabled = 1; - - // Defines how a request is considered a success/failure. - oneof evaluation_criteria { - option (validate.required) = true; - - SuccessCriteria success_criteria = 2; - } - - // The sliding time window over which the success rate is calculated. The window is rounded to the - // nearest second. Defaults to 30s. - google.protobuf.Duration sampling_window = 3; - - // Rejection probability is defined by the formula:: - // - // max(0, (rq_count - rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression) - // - // The aggression dictates how heavily the admission controller will throttle requests upon SR - // dropping at or below the threshold. A value of 1 will result in a linear increase in - // rejection probability as SR drops. Any values less than 1.0, will be set to 1.0. If the - // message is unspecified, the aggression is 1.0. See `the admission control documentation - // `_ - // for a diagram illustrating this. - config.core.v3.RuntimeDouble aggression = 4; - - // Dictates the success rate at which the rejection probability is non-zero. As success rate drops - // below this threshold, rejection probability will increase. Any success rate above the threshold - // results in a rejection probability of 0. Defaults to 95%. - config.core.v3.RuntimePercent sr_threshold = 5; - - // If the average RPS of the sampling window is below this threshold, the request - // will not be rejected, even if the success rate is lower than sr_threshold. - // Defaults to 0. - config.core.v3.RuntimeUInt32 rps_threshold = 6; - - // The probability of rejection will never exceed this value, even if the failure rate is rising. - // Defaults to 80%. - config.core.v3.RuntimePercent max_rejection_probability = 7; -} diff --git a/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto b/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto index e628a6ca73fbb..0f0609b6e55ed 100644 --- a/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto +++ b/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto @@ -15,10 +15,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Configuration for the alternate protocols cache HTTP filter. // [#extension: envoy.filters.http.alternate_protocols_cache] -// TODO(RyanTheOptimist): Move content from source/docs/http3_upstream.md to -// docs/root/intro/arch_overview/upstream/connection_pooling.rst when unhiding the proto. message FilterConfig { - // [#not-implemented-hide:] // If set, causes the use of the alternate protocols cache, which is responsible for // parsing and caching HTTP Alt-Svc headers. This enables the use of HTTP/3 for upstream // servers that advertise supporting it. diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD b/api/envoy/extensions/filters/http/bandwidth_limit/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD rename to api/envoy/extensions/filters/http/bandwidth_limit/v3/BUILD diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/api/envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto similarity index 93% rename from api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto rename to api/envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto index 4cd5f8268b704..c512d541aaefc 100644 --- a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto +++ b/api/envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.http.bandwidth_limit.v3alpha; +package envoy.extensions.filters.http.bandwidth_limit.v3; import "envoy/config/core/v3/base.proto"; @@ -10,10 +10,9 @@ import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.bandwidth_limit.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.bandwidth_limit.v3"; option java_outer_classname = "BandwidthLimitProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Bandwidth limit] diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/BUILD b/api/envoy/extensions/filters/http/cache/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/cache/v3alpha/BUILD rename to api/envoy/extensions/filters/http/cache/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3/cache.proto similarity index 96% rename from generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto rename to api/envoy/extensions/filters/http/cache/v3/cache.proto index 5f0a5befa4bb3..71f4a5bb73f93 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/api/envoy/extensions/filters/http/cache/v3/cache.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.http.cache.v3alpha; +package envoy.extensions.filters.http.cache.v3; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/string.proto"; @@ -11,10 +11,9 @@ import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3"; option java_outer_classname = "CacheProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Cache Filter] diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto deleted file mode 100644 index 5f0a5befa4bb3..0000000000000 --- a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.cache.v3alpha; - -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha"; -option java_outer_classname = "CacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP Cache Filter] - -// [#extension: envoy.filters.http.cache] -message CacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.cache.v2alpha.CacheConfig"; - - // [#not-implemented-hide:] - // Modifies cache key creation by restricting which parts of the URL are included. - message KeyCreatorParams { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.cache.v2alpha.CacheConfig.KeyCreatorParams"; - - // If true, exclude the URL scheme from the cache key. Set to true if your origins always - // produce the same response for http and https requests. - bool exclude_scheme = 1; - - // If true, exclude the host from the cache key. Set to true if your origins' responses don't - // ever depend on host. - bool exclude_host = 2; - - // If *query_parameters_included* is nonempty, only query parameters matched - // by one or more of its matchers are included in the cache key. Any other - // query params will not affect cache lookup. - repeated config.route.v3.QueryParameterMatcher query_parameters_included = 3; - - // If *query_parameters_excluded* is nonempty, query parameters matched by one - // or more of its matchers are excluded from the cache key (even if also - // matched by *query_parameters_included*), and will not affect cache lookup. - repeated config.route.v3.QueryParameterMatcher query_parameters_excluded = 4; - } - - // Config specific to the cache storage implementation. - // [#extension-category: envoy.filters.http.cache] - google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - - // List of matching rules that defines allowed *Vary* headers. - // - // The *vary* response header holds a list of header names that affect the - // contents of a response, as described by - // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. - // - // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't matched by any rules in - // *allowed_vary_headers*, that response will not be cached. - // - // During lookup, *allowed_vary_headers* controls what request headers will be - // sent to the cache storage implementation. - repeated type.matcher.v3.StringMatcher allowed_vary_headers = 2; - - // [#not-implemented-hide:] - // - // - // Modifies cache key creation by restricting which parts of the URL are included. - KeyCreatorParams key_creator_params = 3; - - // [#not-implemented-hide:] - // - // - // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache - // storage implementation may have its own limit beyond which it will reject insertions). - uint32 max_body_bytes = 4; -} diff --git a/api/envoy/extensions/watchdog/profile_action/v3alpha/BUILD b/api/envoy/extensions/filters/http/cdn_loop/v3/BUILD similarity index 100% rename from api/envoy/extensions/watchdog/profile_action/v3alpha/BUILD rename to api/envoy/extensions/filters/http/cdn_loop/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto b/api/envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto similarity index 89% rename from generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto rename to api/envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto index 5f201026c66b3..77a19511c3d45 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto +++ b/api/envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto @@ -1,14 +1,13 @@ syntax = "proto3"; -package envoy.extensions.filters.http.cdn_loop.v3alpha; +package envoy.extensions.filters.http.cdn_loop.v3; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3"; option java_outer_classname = "CdnLoopProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP CDN-Loop Filter] diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto deleted file mode 100644 index 5f201026c66b3..0000000000000 --- a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.cdn_loop.v3alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha"; -option java_outer_classname = "CdnLoopProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP CDN-Loop Filter] -// [#extension: envoy.filters.http.cdn_loop] - -// CDN-Loop Header filter config. See the :ref:`configuration overview -// ` for more information. -message CdnLoopConfig { - // The CDN identifier to use for loop checks and to append to the - // CDN-Loop header. - // - // RFC 8586 calls this the cdn-id. The cdn-id can either be a - // pseudonym or hostname the CDN is in control of. - // - // cdn_id must not be empty. - string cdn_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The maximum allowed count of cdn_id in the downstream CDN-Loop - // request header. - // - // The default of 0 means a request can transit the CdnLoopFilter - // once. A value of 1 means that a request can transit the - // CdnLoopFilter twice and so on. - uint32 max_allowed_occurrences = 2; -} diff --git a/api/envoy/extensions/filters/http/composite/v3/BUILD b/api/envoy/extensions/filters/http/composite/v3/BUILD index 1c1a6f6b44235..e9b556d681cfd 100644 --- a/api/envoy/extensions/filters/http/composite/v3/BUILD +++ b/api/envoy/extensions/filters/http/composite/v3/BUILD @@ -8,5 +8,6 @@ api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", ], ) diff --git a/api/envoy/extensions/filters/http/composite/v3/composite.proto b/api/envoy/extensions/filters/http/composite/v3/composite.proto index f8a3bd83af567..a53364e8adfaf 100644 --- a/api/envoy/extensions/filters/http/composite/v3/composite.proto +++ b/api/envoy/extensions/filters/http/composite/v3/composite.proto @@ -4,6 +4,8 @@ package envoy.extensions.filters.http.composite.v3; import "envoy/config/core/v3/extension.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.composite.v3"; @@ -25,9 +27,8 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // where a match tree is specified that indicates (via // :ref:`ExecuteFilterAction `) // which filter configuration to create and delegate to. -// -// [#alpha:] message Composite { + option (xds.annotations.v3.message_status).work_in_progress = true; } // Composite match action (see :ref:`matching docs ` for more info on match actions). diff --git a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto index a5d7223b98d28..ecf2d271f952c 100644 --- a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto +++ b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto @@ -27,6 +27,12 @@ message FilterConfig { // `. common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; + + // When this flag is set, the filter will add the resolved upstream address in the filter + // state. The state should be saved with key + // `envoy.stream.upstream_address` (See + // :repo:`upstream_address.h`). + bool save_upstream_address = 2; } // Per route Configuration for the dynamic forward proxy HTTP filter. diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 62feb51b191d5..b05420fa93cf4 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -244,6 +244,7 @@ message AuthorizationRequest { repeated config.core.v3.HeaderValue headers_to_add = 2; } +// [#next-free-field: 6] message AuthorizationResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.AuthorizationResponse"; @@ -270,6 +271,15 @@ message AuthorizationResponse { // the authorization response itself is successful, i.e. not failed or denied. When this list is // *not* set, no additional headers will be added to the client's response on success. type.matcher.v3.ListStringMatcher allowed_client_headers_on_success = 4; + + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be emitted as dynamic metadata to be consumed + // by the next filter. This metadata lives in a namespace specified by the canonical name of extension filter + // that requires it: + // + // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. + // - :ref:`envoy.filters.network.ext_authz ` for network filter. + type.matcher.v3.ListStringMatcher dynamic_metadata_from_headers = 5; } // Extra settings on a per virtualhost/route/weighted-cluster level. diff --git a/generated_api_shadow/envoy/config/ratelimit/v3/BUILD b/api/envoy/extensions/filters/http/ext_proc/v3/BUILD similarity index 84% rename from generated_api_shadow/envoy/config/ratelimit/v3/BUILD rename to api/envoy/extensions/filters/http/ext_proc/v3/BUILD index 1c1a6f6b44235..e9b556d681cfd 100644 --- a/generated_api_shadow/envoy/config/ratelimit/v3/BUILD +++ b/api/envoy/extensions/filters/http/ext_proc/v3/BUILD @@ -8,5 +8,6 @@ api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", ], ) diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto b/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto similarity index 96% rename from api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto rename to api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto index 37560feba3c27..e688657830a07 100644 --- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto +++ b/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto @@ -1,20 +1,22 @@ syntax = "proto3"; -package envoy.extensions.filters.http.ext_proc.v3alpha; +package envoy.extensions.filters.http.ext_proc.v3; import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto"; +import "envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto"; import "google/protobuf/duration.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3"; option java_outer_classname = "ExtProcProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; // [#protodoc-title: External Processing Filter] // External Processing Filter @@ -88,9 +90,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // The protocol itself is based on a bidirectional gRPC stream. Envoy will send the // server -// :ref:`ProcessingRequest ` +// :ref:`ProcessingRequest ` // messages, and the server must reply with -// :ref:`ProcessingResponse `. +// :ref:`ProcessingResponse `. // [#next-free-field: 9] message ExternalProcessor { diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto b/api/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto similarity index 93% rename from generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto rename to api/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto index d085790d34ab1..c15a5569a12c6 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto +++ b/api/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto @@ -1,15 +1,17 @@ syntax = "proto3"; -package envoy.extensions.filters.http.ext_proc.v3alpha; +package envoy.extensions.filters.http.ext_proc.v3; + +import "xds/annotations/v3/status.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3"; option java_outer_classname = "ProcessingModeProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; // [#protodoc-title: External Processing Filter] // External Processing Filter Processing Mode diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto b/api/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto deleted file mode 100644 index d085790d34ab1..0000000000000 --- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ext_proc.v3alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha"; -option java_outer_classname = "ProcessingModeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: External Processing Filter] -// External Processing Filter Processing Mode -// [#extension: envoy.filters.http.ext_proc] - -// This configuration describes which parts of an HTTP request and -// response are sent to a remote server and how they are delivered. - -// [#next-free-field: 7] -message ProcessingMode { - // Control how headers and trailers are handled - enum HeaderSendMode { - // The default HeaderSendMode depends on which part of the message is being - // processed. By default, request and response headers are sent, - // while trailers are skipped. - DEFAULT = 0; - - // Send the header or trailer. - SEND = 1; - - // Do not send the header or trailer. - SKIP = 2; - } - - // Control how the request and response bodies are handled - enum BodySendMode { - // Do not send the body at all. This is the default. - NONE = 0; - - // Stream the body to the server in pieces as they arrive at the - // proxy. - STREAMED = 1; - - // Buffer the message body in memory and send the entire body at once. - // If the body exceeds the configured buffer limit, then the - // downstream system will receive an error. - BUFFERED = 2; - - // Buffer the message body in memory and send the entire body in one - // chunk. If the body exceeds the configured buffer limit, then the body contents - // up to the buffer limit will be sent. - BUFFERED_PARTIAL = 3; - } - - // How to handle the request header. Default is "SEND". - HeaderSendMode request_header_mode = 1 [(validate.rules).enum = {defined_only: true}]; - - // How to handle the response header. Default is "SEND". - HeaderSendMode response_header_mode = 2 [(validate.rules).enum = {defined_only: true}]; - - // How to handle the request body. Default is "NONE". - BodySendMode request_body_mode = 3 [(validate.rules).enum = {defined_only: true}]; - - // How do handle the response body. Default is "NONE". - BodySendMode response_body_mode = 4 [(validate.rules).enum = {defined_only: true}]; - - // How to handle the request trailers. Default is "SKIP". - HeaderSendMode request_trailer_mode = 5 [(validate.rules).enum = {defined_only: true}]; - - // How to handle the response trailers. Default is "SKIP". - HeaderSendMode response_trailer_mode = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto b/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto index a4feeff31f158..7311abe8df6f8 100644 --- a/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto +++ b/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto @@ -15,7 +15,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // gRPC-JSON transcoder :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_json_transcoder] -// [#next-free-field: 12] +// [#next-free-field: 13] // GrpcJsonTranscoder filter configuration. // The filter itself can be used per route / per virtual host or on the general level. The most // specific one is being used for a given route. If the list of services is empty - filter @@ -211,12 +211,16 @@ message GrpcJsonTranscoder { bool convert_grpc_status = 9; // URL unescaping policy. - // This spec is only applied when extracting variable with multiple segments. + // This spec is only applied when extracting variable with multiple segments in the URL path. // For example, in case of `/foo/{x=*}/bar/{y=prefix/*}/{z=**}` `x` variable is single segment and `y` and `z` are multiple segments. // For a path with `/foo/first/bar/prefix/second/third/fourth`, `x=first`, `y=prefix/second`, `z=third/fourth`. // If this setting is not specified, the value defaults to :ref:`ALL_CHARACTERS_EXCEPT_RESERVED`. UrlUnescapeSpec url_unescape_spec = 10 [(validate.rules).enum = {defined_only: true}]; + // If true, unescape '+' to space when extracting variables in query parameters. + // This is to support `HTML 2.0 `_ + bool query_param_unescape_plus = 12; + // Configure the behavior when handling requests that cannot be transcoded. // // By default, the transcoder will silently pass through HTTP requests that are malformed. diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 9718dbe0550ab..6d15956e1479e 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // cache_duration: // seconds: 300 // -// [#next-free-field: 14] +// [#next-free-field: 15] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; @@ -137,6 +137,7 @@ message JwtProvider { // If false, the JWT is removed in the request after a success verification. If true, the JWT is // not removed in the request. Default value is false. + // caveat: only works for from_header & has no effect for JWTs extracted through from_params & from_cookies. bool forward = 5; // Two fields below define where to extract the JWT from an HTTP request. @@ -230,6 +231,46 @@ message JwtProvider { // string payload_in_metadata = 9; + // If not empty, similar to :ref:`payload_in_metadata `, + // a successfully verified JWT header will be written to :ref:`Dynamic State ` + // as an entry (``protobuf::Struct``) in **envoy.filters.http.jwt_authn** *namespace* with the + // value of this field as the key. + // + // For example, if ``header_in_metadata`` is *my_header*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_header: + // alg: JWT + // kid: EF71iSaosbC5C4tC6Syq1Gm647M + // alg: PS256 + // + // When the metadata has **envoy.filters.http.jwt_authn** entry already (for example if + // :ref:`payload_in_metadata ` + // is not empty), it will be inserted as a new entry in the same *namespace* as shown below: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // my_header: + // alg: JWT + // kid: EF71iSaosbC5C4tC6Syq1Gm647M + // alg: PS256 + // + // .. warning:: + // Using the same key name for :ref:`header_in_metadata ` + // and :ref:`payload_in_metadata ` + // is not suggested due to potential override of existing entry, while it is not enforced during + // config validation. + // + string header_in_metadata = 14; + // Specify the clock skew in seconds when verifying JWT time constraint, // such as `exp`, and `nbf`. If not specified, default is 60 seconds. uint32 clock_skew_seconds = 10; @@ -541,7 +582,8 @@ message FilterStateRule { // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. - map requires = 3; + map + requires = 3; } // This is the Envoy HTTP filter config for JWT authentication. diff --git a/api/envoy/extensions/filters/http/oauth2/v3alpha/BUILD b/api/envoy/extensions/filters/http/oauth2/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/oauth2/v3alpha/BUILD rename to api/envoy/extensions/filters/http/oauth2/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v3/oauth.proto similarity index 96% rename from generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto rename to api/envoy/extensions/filters/http/oauth2/v3/oauth.proto index e5f990512ca87..e88455454715d 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto +++ b/api/envoy/extensions/filters/http/oauth2/v3/oauth.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.http.oauth2.v3alpha; +package envoy.extensions.filters.http.oauth2.v3; import "envoy/config/core/v3/http_uri.proto"; import "envoy/config/route/v3/route_components.proto"; @@ -10,10 +10,9 @@ import "envoy/type/matcher/v3/path.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3"; option java_outer_classname = "OauthProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: OAuth] diff --git a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto deleted file mode 100644 index e5f990512ca87..0000000000000 --- a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.oauth2.v3alpha; - -import "envoy/config/core/v3/http_uri.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; -import "envoy/type/matcher/v3/path.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3alpha"; -option java_outer_classname = "OauthProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: OAuth] -// OAuth :ref:`configuration overview `. -// [#extension: envoy.filters.http.oauth2] -// - -message OAuth2Credentials { - // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. - string client_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. - transport_sockets.tls.v3.SdsSecretConfig token_secret = 2 - [(validate.rules).message = {required: true}]; - - // Configures how the secret token should be created. - oneof token_formation { - option (validate.required) = true; - - // If present, the secret token will be a HMAC using the provided secret. - transport_sockets.tls.v3.SdsSecretConfig hmac_secret = 3 - [(validate.rules).message = {required: true}]; - } -} - -// OAuth config -// -// [#next-free-field: 11] -message OAuth2Config { - // Endpoint on the authorization server to retrieve the access token from. - config.core.v3.HttpUri token_endpoint = 1; - - // The endpoint redirect to for authorization in response to unauthorized requests. - string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; - - // Credentials used for OAuth. - OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; - - // The redirect URI passed to the authorization endpoint. Supports header formatting - // tokens. For more information, including details on header value syntax, see the - // documentation on :ref:`custom request headers `. - // - // This URI should not contain any query parameters. - string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; - - // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. - type.matcher.v3.PathMatcher redirect_path_matcher = 5 - [(validate.rules).message = {required: true}]; - - // The path to sign a user out, clearing their credential cookies. - type.matcher.v3.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}]; - - // Forward the OAuth token as a Bearer to upstream web service. - bool forward_bearer_token = 7; - - // Any request that matches any of the provided matchers will be passed through without OAuth validation. - repeated config.route.v3.HeaderMatcher pass_through_matcher = 8; - - // Optional list of OAuth scopes to be claimed in the authorization request. If not specified, - // defaults to "user" scope. - // OAuth RFC https://tools.ietf.org/html/rfc6749#section-3.3 - repeated string auth_scopes = 9; - - // Optional resource parameter for authorization request - // RFC: https://tools.ietf.org/html/rfc8707 - repeated string resources = 10; -} - -// Filter config. -message OAuth2 { - // Leave this empty to disable OAuth2 for a specific route, using per filter config. - OAuth2Config config = 1; -} diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/BUILD b/api/envoy/extensions/filters/http/ratelimit/v3/BUILD index 0bad14913d217..5b2bddfabb819 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/BUILD +++ b/api/envoy/extensions/filters/http/ratelimit/v3/BUILD @@ -6,7 +6,10 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/core/v3:pkg", "//envoy/config/ratelimit/v3:pkg", + "//envoy/config/route/v3:pkg", + "//envoy/type/metadata/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index bc58e7f9b2e1a..53fb849361c1d 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -2,7 +2,10 @@ syntax = "proto3"; package envoy.extensions.filters.http.ratelimit.v3; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/ratelimit/v3/rls.proto"; +import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "google/protobuf/duration.proto"; @@ -105,6 +108,214 @@ message RateLimit { bool disable_x_envoy_ratelimited_header = 9; } +// Global rate limiting :ref:`architecture overview `. +// Also applies to Local rate limiting :ref:`using descriptors `. +// [#not-implemented-hide:] +message RateLimitConfig { + // [#next-free-field: 10] + message Action { + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("source_cluster", "") + // + // is derived from the :option:`--service-cluster` option. + message SourceCluster { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("destination_cluster", "") + // + // Once a request matches against a route table rule, a routed cluster is determined by one of + // the following :ref:`route table configuration ` + // settings: + // + // * :ref:`cluster ` indicates the upstream cluster + // to route to. + // * :ref:`weighted_clusters ` + // chooses a cluster randomly from a set of clusters with attributed weight. + // * :ref:`cluster_header ` indicates which + // header in the request contains the target cluster. + message DestinationCluster { + } + + // The following descriptor entry is appended when a header contains a key that matches the + // *header_name*: + // + // .. code-block:: cpp + // + // ("", "") + message RequestHeaders { + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The key to use in the descriptor entry. + string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; + } + + // The following descriptor entry is appended to the descriptor and is populated using the + // trusted address from :ref:`x-forwarded-for `: + // + // .. code-block:: cpp + // + // ("remote_address", "") + message RemoteAddress { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("generic_key", "") + message GenericKey { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; + + // An optional key to use in the descriptor entry. If not set it defaults + // to 'generic_key' as the descriptor key. + string descriptor_key = 2; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("header_match", "") + message HeaderValueMatch { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; + + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + bool expect_match = 2; + + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request’s headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + repeated config.route.v3.HeaderMatcher headers = 3 + [(validate.rules).repeated = {min_items: 1}]; + } + + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message MetaData { + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + } + + oneof action_specifier { + option (validate.required) = true; + + // Rate limit on source cluster. + SourceCluster source_cluster = 1; + + // Rate limit on destination cluster. + DestinationCluster destination_cluster = 2; + + // Rate limit on request headers. + RequestHeaders request_headers = 3; + + // Rate limit on remote address. + RemoteAddress remote_address = 4; + + // Rate limit on a generic key. + GenericKey generic_key = 5; + + // Rate limit on the existence of request headers. + HeaderValueMatch header_value_match = 6; + + // Rate limit on metadata. + MetaData metadata = 8; + + // Rate limit descriptor extension. See the rate limit descriptor extensions documentation. + // [#extension-category: envoy.rate_limit_descriptors] + config.core.v3.TypedExtensionConfig extension = 9; + } + } + + message Override { + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; + } + } + + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + uint32 stage = 1 [(validate.rules).uint32 = {lte: 10}]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; +} + message RateLimitPerRoute { enum VhRateLimitsOptions { // Use the virtual host rate limits unless the route has a rate limit policy. @@ -117,6 +328,32 @@ message RateLimitPerRoute { IGNORE = 2; } + // The override option determines how the filter handles the cases where there is an override config at a more specific level than this one (from least to most specific: virtual host, route, cluster weight). + // [#not-implemented-hide:] + enum OverrideOptions { + // Client-defined default, typically OVERRIDE_POLICY. If VhRateLimitsOptions is set, that will be used instead. + DEFAULT = 0; + + // If there is an override config at a more specific level, use that instead of this one. + OVERRIDE_POLICY = 1; + + // If there is an override config at a more specific level, use data from both. + INCLUDE_POLICY = 2; + + // If there is an override config at a more specific level, ignore it and use only this one. + IGNORE_POLICY = 3; + } + // Specifies if the rate limit filter should include the virtual host rate limits. VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}]; + + // Specifies if the rate limit filter should include the lower levels (route level, virtual host level or cluster weight level) rate limits override options. + // [#not-implemented-hide:] + OverrideOptions override_option = 2 [(validate.rules).enum = {defined_only: true}]; + + // Rate limit configuration. If not set, uses the + // :ref:`VirtualHost.rate_limits` or + // :ref:`RouteAction.rate_limits` fields instead. + // [#not-implemented-hide:] + repeated RateLimitConfig rate_limits = 3; } diff --git a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD rename to api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto similarity index 92% rename from generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto rename to api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto index 7f7eb57d5be64..a084b0682b672 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto +++ b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto @@ -1,16 +1,15 @@ syntax = "proto3"; -package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha; +package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3"; option java_outer_classname = "SniDynamicForwardProxyProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SNI dynamic forward proxy] diff --git a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto deleted file mode 100644 index 7f7eb57d5be64..0000000000000 --- a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha; - -import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha"; -option java_outer_classname = "SniDynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SNI dynamic forward proxy] - -// Configuration for the SNI-based dynamic forward proxy filter. See the -// :ref:`architecture overview ` for -// more information. Note this filter must be configured along with -// :ref:`TLS inspector listener filter ` -// to work. -// [#extension: envoy.filters.network.sni_dynamic_forward_proxy] -message FilterConfig { - // The DNS cache configuration that the filter will attach to. Note this - // configuration must match that of associated :ref:`dynamic forward proxy - // cluster configuration - // `. - common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; - - oneof port_specifier { - // The port number to connect to the upstream. - uint32 port_value = 2 [(validate.rules).uint32 = {lte: 65535 gt: 0}]; - } -} diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto index 4916330ec5f3a..01c41c77bb2b5 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto @@ -85,8 +85,8 @@ message ThriftProxy { repeated ThriftFilter thrift_filters = 5; // If set to true, Envoy will try to skip decode data after metadata in the Thrift message. - // This mode will only work if the upstream and downstream protocols are the same and the transport - // is the same, the transport type is framed and the protocol is not Twitter. Otherwise Envoy will + // This mode will only work if the upstream and downstream protocols are the same and the transports + // are Framed or Header, and the protocol is not Twitter. Otherwise Envoy will // fallback to decode the data. bool payload_passthrough = 6; diff --git a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD b/api/envoy/extensions/filters/udp/dns_filter/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD rename to api/envoy/extensions/filters/udp/dns_filter/v3/BUILD diff --git a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto similarity index 96% rename from api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto rename to api/envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto index 39f44724c430f..63542bdadc7fd 100644 --- a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto +++ b/api/envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.udp.dns_filter.v3alpha; +package envoy.extensions.filters.udp.dns_filter.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; @@ -13,10 +13,9 @@ import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3"; option java_outer_classname = "DnsFilterProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: DNS Filter] diff --git a/api/envoy/extensions/key_value/file_based/v3/BUILD b/api/envoy/extensions/key_value/file_based/v3/BUILD index ee92fb652582e..ec1e778e06e5c 100644 --- a/api/envoy/extensions/key_value/file_based/v3/BUILD +++ b/api/envoy/extensions/key_value/file_based/v3/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], + deps = [ + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", + ], ) diff --git a/api/envoy/extensions/key_value/file_based/v3/config.proto b/api/envoy/extensions/key_value/file_based/v3/config.proto index 0eff4feb8f941..82aa94f8cb648 100644 --- a/api/envoy/extensions/key_value/file_based/v3/config.proto +++ b/api/envoy/extensions/key_value/file_based/v3/config.proto @@ -4,6 +4,8 @@ package envoy.extensions.key_value.file_based.v3; import "google/protobuf/duration.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -14,10 +16,11 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: File Based Key Value Store storage plugin] -// [#alpha:] // [#extension: envoy.key_value.file_based] // This is configuration to flush a key value store out to disk. message FileBasedKeyValueStoreConfig { + option (xds.annotations.v3.message_status).work_in_progress = true; + // The filename to read the keys and values from, and write the keys and // values to. string filename = 1 [(validate.rules).string = {min_len: 1}]; diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD b/api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/BUILD similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD rename to api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/BUILD diff --git a/api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.proto b/api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.proto new file mode 100644 index 0000000000000..a4bdc73fa81a0 --- /dev/null +++ b/api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.extensions.rbac.matchers.upstream_ip_port.v3; + +import "envoy/config/core/v3/address.proto"; +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.rbac.matchers.upstream_ip_port.v3"; +option java_outer_classname = "UpstreamIpPortMatcherProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: RBAC upstream IP and port matcher plugin] +// [#extension: envoy.rbac.matchers.upstream_ip_port] + +// This is configuration for matching upstream ip and port. +// Note that although both fields are optional, at least one of IP or port must be supplied. If only +// one is supplied the other is a wildcard match. +// This matcher requires a filter in the chain to have saved the upstream address in the +// filter state before the matcher is executed by RBAC filter. The state should be saved with key +// `envoy.stream.upstream_address` (See +// :repo:`upstream_address.h`). +// Also, See :repo:`proxy_filter.cc< +// source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc>` for an example of a +// filter which populates the FilterState. +message UpstreamIpPortMatcher { + // A CIDR block that will be used to match the upstream IP. + // Both Ipv4 and Ipv6 ranges can be matched. + config.core.v3.CidrRange upstream_ip = 1; + + // A port range that will be used to match the upstream port. + type.v3.Int64Range upstream_port_range = 2; +} diff --git a/api/envoy/watchdog/v3alpha/BUILD b/api/envoy/extensions/transport_sockets/s2a/v3/BUILD similarity index 100% rename from api/envoy/watchdog/v3alpha/BUILD rename to api/envoy/extensions/transport_sockets/s2a/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto b/api/envoy/extensions/transport_sockets/s2a/v3/s2a.proto similarity index 83% rename from generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto rename to api/envoy/extensions/transport_sockets/s2a/v3/s2a.proto index b32b84653e690..7c77222f59d63 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto +++ b/api/envoy/extensions/transport_sockets/s2a/v3/s2a.proto @@ -1,14 +1,13 @@ syntax = "proto3"; -package envoy.extensions.transport_sockets.s2a.v3alpha; +package envoy.extensions.transport_sockets.s2a.v3; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.s2a.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.s2a.v3"; option java_outer_classname = "S2aProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#not-implemented-hide:] diff --git a/api/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto b/api/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto deleted file mode 100644 index b32b84653e690..0000000000000 --- a/api/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.s2a.v3alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.s2a.v3alpha"; -option java_outer_classname = "S2aProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#not-implemented-hide:] -// Configuration for S2A transport socket. This allows Envoy clients to -// configure how to offload mTLS handshakes to the S2A service. -// https://github.com/google/s2a-core#readme -message S2AConfiguration { - // The address of the S2A. This can be an IP address or a hostname, - // followed by a port number. - string s2a_address = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto index 271dcfbe49cec..1267488d98c6a 100644 --- a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto +++ b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto @@ -118,12 +118,13 @@ message HttpProtocolOptions { // is alpha is not guaranteed to be API-stable. config.core.v3.Http3ProtocolOptions http3_protocol_options = 3; - // [#not-implemented-hide:] // The presence of alternate protocols cache options causes the use of the // alternate protocols cache, which is responsible for parsing and caching // HTTP Alt-Svc headers. This enables the use of HTTP/3 for origins that // advertise supporting it. - // TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled. + // + // .. note:: + // This is required when HTTP/3 is enabled. config.core.v3.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 4; } diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD b/api/envoy/extensions/watchdog/profile_action/v3/BUILD similarity index 100% rename from generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD rename to api/envoy/extensions/watchdog/profile_action/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto b/api/envoy/extensions/watchdog/profile_action/v3/profile_action.proto similarity index 87% rename from generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto rename to api/envoy/extensions/watchdog/profile_action/v3/profile_action.proto index d73f0b5dfb9c5..07c3907fbd61a 100644 --- a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto +++ b/api/envoy/extensions/watchdog/profile_action/v3/profile_action.proto @@ -1,16 +1,15 @@ syntax = "proto3"; -package envoy.extensions.watchdog.profile_action.v3alpha; +package envoy.extensions.watchdog.profile_action.v3; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3"; option java_outer_classname = "ProfileActionProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Watchdog Action that does CPU profiling.] diff --git a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto b/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto deleted file mode 100644 index d73f0b5dfb9c5..0000000000000 --- a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.watchdog.profile_action.v3alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3alpha"; -option java_outer_classname = "ProfileActionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Watchdog Action that does CPU profiling.] -// [#extension: envoy.watchdog.profile_action] - -// Configuration for the profile watchdog action. -message ProfileActionConfig { - // How long the profile should last. If not set defaults to 5 seconds. - google.protobuf.Duration profile_duration = 1; - - // File path to the directory to output profiles. - string profile_path = 2 [(validate.rules).string = {min_len: 1}]; - - // Limits the max number of profiles that can be generated by this action - // over its lifetime to avoid filling the disk. - // If not set (i.e. it's 0), a default of 10 will be used. - uint64 max_profiles = 3; -} diff --git a/api/envoy/service/auth/v3/external_auth.proto b/api/envoy/service/auth/v3/external_auth.proto index b627fcb314751..11fc057da888a 100644 --- a/api/envoy/service/auth/v3/external_auth.proto +++ b/api/envoy/service/auth/v3/external_auth.proto @@ -12,7 +12,6 @@ import "google/rpc/status.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v3"; option java_outer_classname = "ExternalAuthProto"; @@ -46,9 +45,9 @@ message DeniedHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.DeniedHttpResponse"; - // This field allows the authorization service to send a HTTP response status - // code to the downstream client other than 403 (Forbidden). - type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; + // This field allows the authorization service to send an HTTP response status code to the + // downstream client. If not set, Envoy sends ``403 Forbidden`` HTTP status code by default. + type.v3.HttpStatus status = 1; // This field allows the authorization service to send HTTP response headers // to the downstream client. Note that the :ref:`append field in HeaderValueOption ` defaults to @@ -61,7 +60,7 @@ message DeniedHttpResponse { } // HTTP attributes for an OK response. -// [#next-free-field: 7] +// [#next-free-field: 9] message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.OkHttpResponse"; @@ -103,6 +102,15 @@ message OkHttpResponse { // to the downstream client on success. Note that the :ref:`append field in HeaderValueOption ` // defaults to false when used in this message. repeated config.core.v3.HeaderValueOption response_headers_to_add = 6; + + // This field allows the authorization service to set (and overwrite) query + // string parameters on the original request before it is sent upstream. + repeated config.core.v3.QueryParameter query_parameters_to_set = 7; + + // This field allows the authorization service to specify which query parameters + // should be removed from the original request before it is sent upstream. Each + // element in this list is a case-sensitive query parameter name to be removed. + repeated string query_parameters_to_remove = 8; } // Intended for gRPC and Network Authorization servers `only`. @@ -110,7 +118,9 @@ message CheckResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.CheckResponse"; - // Status `OK` allows the request. Any other status indicates the request should be denied. + // Status `OK` allows the request. Any other status indicates the request should be denied, and + // for HTTP filter, if not overridden by :ref:`denied HTTP response status ` + // Envoy sends ``403 Forbidden`` HTTP status code by default. google.rpc.Status status = 1; // An message that contains HTTP response attributes. This message is diff --git a/generated_api_shadow/envoy/service/auth/v3/BUILD b/api/envoy/service/ext_proc/v3/BUILD similarity index 76% rename from generated_api_shadow/envoy/service/auth/v3/BUILD rename to api/envoy/service/ext_proc/v3/BUILD index 0774dda23e421..d4506b16ed5d2 100644 --- a/generated_api_shadow/envoy/service/auth/v3/BUILD +++ b/api/envoy/service/ext_proc/v3/BUILD @@ -7,10 +7,10 @@ licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ - "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", - "//envoy/service/auth/v2:pkg", + "//envoy/extensions/filters/http/ext_proc/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", ], ) diff --git a/generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto b/api/envoy/service/ext_proc/v3/external_processor.proto similarity index 97% rename from generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto rename to api/envoy/service/ext_proc/v3/external_processor.proto index 09572331aa42a..dc6b527d5bcc9 100644 --- a/generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto +++ b/api/envoy/service/ext_proc/v3/external_processor.proto @@ -1,22 +1,24 @@ syntax = "proto3"; -package envoy.service.ext_proc.v3alpha; +package envoy.service.ext_proc.v3; import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto"; +import "envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto"; import "envoy/type/v3/http_status.proto"; import "google/protobuf/struct.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.service.ext_proc.v3alpha"; +option java_package = "io.envoyproxy.envoy.service.ext_proc.v3"; option java_outer_classname = "ExternalProcessorProto"; option java_multiple_files = true; option java_generic_services = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; // [#protodoc-title: External Processing Service] @@ -167,7 +169,7 @@ message ProcessingResponse { // for the duration of this particular request/response only. Servers // may use this to intelligently control how requests are processed // based on the headers and other metadata that they see. - envoy.extensions.filters.http.ext_proc.v3alpha.ProcessingMode mode_override = 9; + envoy.extensions.filters.http.ext_proc.v3.ProcessingMode mode_override = 9; } // The following are messages that are sent to the server. diff --git a/api/envoy/service/ext_proc/v3alpha/BUILD b/api/envoy/service/ext_proc/v3alpha/BUILD deleted file mode 100644 index 4f3730e2af32e..0000000000000 --- a/api/envoy/service/ext_proc/v3alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/service/ext_proc/v3alpha/external_processor.proto b/api/envoy/service/ext_proc/v3alpha/external_processor.proto deleted file mode 100644 index 09572331aa42a..0000000000000 --- a/api/envoy/service/ext_proc/v3alpha/external_processor.proto +++ /dev/null @@ -1,331 +0,0 @@ -syntax = "proto3"; - -package envoy.service.ext_proc.v3alpha; - -import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto"; -import "envoy/type/v3/http_status.proto"; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.ext_proc.v3alpha"; -option java_outer_classname = "ExternalProcessorProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: External Processing Service] - -// A service that can access and modify HTTP requests and responses -// as part of a filter chain. -// The overall external processing protocol works like this: -// -// 1. Envoy sends to the service information about the HTTP request. -// 2. The service sends back a ProcessingResponse message that directs Envoy -// to either stop processing, continue without it, or send it the -// next chunk of the message body. -// 3. If so requested, Envoy sends the server chunks of the message body, -// or the entire body at once. In either case, the server sends back -// a ProcessingResponse after each message it receives. -// 4. If so requested, Envoy sends the server the HTTP trailers, -// and the server sends back a ProcessingResponse. -// 5. At this point, request processing is done, and we pick up again -// at step 1 when Envoy receives a response from the upstream server. -// 6. At any point above, if the server closes the gRPC stream cleanly, -// then Envoy proceeds without consulting the server. -// 7. At any point above, if the server closes the gRPC stream with an error, -// then Envoy returns a 500 error to the client, unless the filter -// was configured to ignore errors. -// -// In other words, the process is a request/response conversation, but -// using a gRPC stream to make it easier for the server to -// maintain state. - -service ExternalProcessor { - // This begins the bidirectional stream that Envoy will use to - // give the server control over what the filter does. The actual - // protocol is described by the ProcessingRequest and ProcessingResponse - // messages below. - rpc Process(stream ProcessingRequest) returns (stream ProcessingResponse) { - } -} - -// This represents the different types of messages that Envoy can send -// to an external processing server. -// [#next-free-field: 8] -message ProcessingRequest { - // Specify whether the filter that sent this request is running in synchronous - // or asynchronous mode. The choice of synchronous or asynchronous mode - // can be set in the filter configuration, and defaults to false. - // - // * A value of "false" indicates that the server must respond - // to this message by either sending back a matching ProcessingResponse message, - // or by closing the stream. - // * A value of "true" indicates that the server must not respond to this - // message, although it may still close the stream to indicate that no more messages - // are needed. - // - bool async_mode = 1; - - // Each request message will include one of the following sub-messages. Which - // ones are set for a particular HTTP request/response depend on the - // processing mode. - oneof request { - option (validate.required) = true; - - // Information about the HTTP request headers, as well as peer info and additional - // properties. Unless "async_mode" is true, the server must send back a - // HeaderResponse message, an ImmediateResponse message, or close the stream. - HttpHeaders request_headers = 2; - - // Information about the HTTP response headers, as well as peer info and additional - // properties. Unless "async_mode" is true, the server must send back a - // HeaderResponse message or close the stream. - HttpHeaders response_headers = 3; - - // A chunk of the HTTP request body. Unless "async_mode" is true, the server must send back - // a BodyResponse message, an ImmediateResponse message, or close the stream. - HttpBody request_body = 4; - - // A chunk of the HTTP request body. Unless "async_mode" is true, the server must send back - // a BodyResponse message or close the stream. - HttpBody response_body = 5; - - // The HTTP trailers for the request path. Unless "async_mode" is true, the server - // must send back a TrailerResponse message or close the stream. - // - // This message is only sent if the trailers processing mode is set to "SEND". - // If there are no trailers on the original downstream request, then this message - // will only be sent (with empty trailers waiting to be populated) if the - // processing mode is set before the request headers are sent, such as - // in the filter configuration. - HttpTrailers request_trailers = 6; - - // The HTTP trailers for the response path. Unless "async_mode" is true, the server - // must send back a TrailerResponse message or close the stream. - // - // This message is only sent if the trailers processing mode is set to "SEND". - // If there are no trailers on the original downstream request, then this message - // will only be sent (with empty trailers waiting to be populated) if the - // processing mode is set before the request headers are sent, such as - // in the filter configuration. - HttpTrailers response_trailers = 7; - } -} - -// For every ProcessingRequest received by the server with the "async_mode" field -// set to false, the server must send back exactly one ProcessingResponse message. -// [#next-free-field: 10] -message ProcessingResponse { - oneof response { - option (validate.required) = true; - - // The server must send back this message in response to a message with the - // "request_headers" field set. - HeadersResponse request_headers = 1; - - // The server must send back this message in response to a message with the - // "response_headers" field set. - HeadersResponse response_headers = 2; - - // The server must send back this message in response to a message with - // the "request_body" field set. - BodyResponse request_body = 3; - - // The server must send back this message in response to a message with - // the "response_body" field set. - BodyResponse response_body = 4; - - // The server must send back this message in response to a message with - // the "request_trailers" field set. - TrailersResponse request_trailers = 5; - - // The server must send back this message in response to a message with - // the "response_trailers" field set. - TrailersResponse response_trailers = 6; - - // If specified, attempt to create a locally generated response, send it - // downstream, and stop processing additional filters and ignore any - // additional messages received from the remote server for this request or - // response. If a response has already started -- for example, if this - // message is sent response to a "response_body" message -- then - // this will either ship the reply directly to the downstream codec, - // or reset the stream. - ImmediateResponse immediate_response = 7; - } - - // [#not-implemented-hide:] - // Optional metadata that will be emitted as dynamic metadata to be consumed by the next - // filter. This metadata will be placed in the namespace "envoy.filters.http.ext_proc". - google.protobuf.Struct dynamic_metadata = 8; - - // Override how parts of the HTTP request and response are processed - // for the duration of this particular request/response only. Servers - // may use this to intelligently control how requests are processed - // based on the headers and other metadata that they see. - envoy.extensions.filters.http.ext_proc.v3alpha.ProcessingMode mode_override = 9; -} - -// The following are messages that are sent to the server. - -// This message is sent to the external server when the HTTP request and responses -// are first received. -message HttpHeaders { - // The HTTP request headers. All header keys will be - // lower-cased, because HTTP header keys are case-insensitive. - config.core.v3.HeaderMap headers = 1; - - // [#not-implemented-hide:] - // The values of properties selected by the "request_attributes" - // or "response_attributes" list in the configuration. Each entry - // in the list is populated - // from the standard :ref:`attributes ` - // supported across Envoy. - map attributes = 2; - - // If true, then there is no message body associated with this - // request or response. - bool end_of_stream = 3; -} - -// This message contains the message body that Envoy sends to the external server. -message HttpBody { - bytes body = 1; - - bool end_of_stream = 2; -} - -// This message contains the trailers. -message HttpTrailers { - config.core.v3.HeaderMap trailers = 1; -} - -// The following are messages that may be sent back by the server. - -// This message must be sent in response to an HttpHeaders message. -message HeadersResponse { - CommonResponse response = 1; -} - -// This message must be sent in response to an HttpTrailers message. -message TrailersResponse { - // Instructions on how to manipulate the trailers - HeaderMutation header_mutation = 1; -} - -// This message must be sent in response to an HttpBody message. -message BodyResponse { - CommonResponse response = 1; -} - -// This message contains common fields between header and body responses. -// [#next-free-field: 6] -message CommonResponse { - enum ResponseStatus { - // Apply the mutation instructions in this message to the - // request or response, and then continue processing the filter - // stream as normal. This is the default. - CONTINUE = 0; - - // Apply the specified header mutation, replace the body with the body - // specified in the body mutation (if present), and do not send any - // further messages for this request or response even if the processing - // mode is configured to do so. - // - // When used in response to a request_headers or response_headers message, - // this status makes it possible to either completely replace the body - // while discarding the original body, or to add a body to a message that - // formerly did not have one. - // - // In other words, this response makes it possible to turn an HTTP GET - // into a POST, PUT, or PATCH. - CONTINUE_AND_REPLACE = 1; - } - - // If set, provide additional direction on how the Envoy proxy should - // handle the rest of the HTTP filter chain. - ResponseStatus status = 1 [(validate.rules).enum = {defined_only: true}]; - - // Instructions on how to manipulate the headers. When responding to an - // HttpBody request, header mutations will only take effect if - // the current processing mode for the body is BUFFERED. - HeaderMutation header_mutation = 2; - - // Replace the body of the last message sent to the remote server on this - // stream. If responding to an HttpBody request, simply replace or clear - // the body chunk that was sent with that request. Body mutations only take - // effect in response to "body" messages and are ignored otherwise. - BodyMutation body_mutation = 3; - - // [#not-implemented-hide:] - // Add new trailers to the message. This may be used when responding to either a - // HttpHeaders or HttpBody message, but only if this message is returned - // along with the CONTINUE_AND_REPLACE status. - config.core.v3.HeaderMap trailers = 4; - - // Clear the route cache for the current request. - // This is necessary if the remote server - // modified headers that are used to calculate the route. - bool clear_route_cache = 5; -} - -// This message causes the filter to attempt to create a locally -// generated response, send it downstream, stop processing -// additional filters, and ignore any additional messages received -// from the remote server for this request or response. If a response -// has already started, then this will either ship the reply directly -// to the downstream codec, or reset the stream. -// [#next-free-field: 6] -message ImmediateResponse { - // The response code to return - type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; - - // Apply changes to the default headers, which will include content-type. - HeaderMutation headers = 2; - - // The message body to return with the response which is sent using the - // text/plain content type, or encoded in the grpc-message header. - string body = 3; - - // If set, then include a gRPC status trailer. - GrpcStatus grpc_status = 4; - - // A string detailing why this local reply was sent, which may be included - // in log and debug output. - string details = 5; -} - -// This message specifies a gRPC status for an ImmediateResponse message. -message GrpcStatus { - // The actual gRPC status - uint32 status = 1; -} - -// Change HTTP headers or trailers by appending, replacing, or removing -// headers. -message HeaderMutation { - // Add or replace HTTP headers. Attempts to set the value of - // any "x-envoy" header, and attempts to set the ":method", - // ":authority", ":scheme", or "host" headers will be ignored. - repeated config.core.v3.HeaderValueOption set_headers = 1; - - // Remove these HTTP headers. Attempts to remove system headers -- - // any header starting with ":", plus "host" -- will be ignored. - repeated string remove_headers = 2; -} - -// Replace the entire message body chunk received in the corresponding -// HttpBody message with this new body, or clear the body. -message BodyMutation { - oneof mutation { - // The entire body to replace - bytes body = 1; - - // Clear the corresponding body chunk - bool clear_body = 2; - } -} diff --git a/api/envoy/service/ratelimit/v3/rls.proto b/api/envoy/service/ratelimit/v3/rls.proto index ab8e0ffc0eba7..113998c4082de 100644 --- a/api/envoy/service/ratelimit/v3/rls.proto +++ b/api/envoy/service/ratelimit/v3/rls.proto @@ -53,7 +53,7 @@ message RateLimitRequest { } // A response from a ShouldRateLimit call. -// [#next-free-field: 7] +// [#next-free-field: 8] message RateLimitResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitResponse"; @@ -103,8 +103,15 @@ message RateLimitResponse { Unit unit = 2; } - // Cacheable quota for responses, see documentation for the :ref:`quota - // ` field. + // Cacheable quota for responses. + // Quota can be granted at different levels: either for each individual descriptor or for the whole descriptor set. + // This is a certain number of requests over a period of time. + // The client may cache this result and apply the effective RateLimitResponse to future matching + // requests without querying rate limit service. + // + // When quota expires due to timeout, a new RLS request will also be made. + // The implementation may choose to preemptively query the rate limit server for more quota on or + // before expiration or before the available quota runs out. // [#not-implemented-hide:] message Quota { // Number of matching requests granted in quota. Must be 1 or more. @@ -114,6 +121,15 @@ message RateLimitResponse { // Point in time at which the quota expires. google.protobuf.Timestamp valid_until = 2; } + + // The unique id that is associated with each Quota either at individual descriptor level or whole descriptor set level. + // + // For a matching policy with boolean logic, for example, match: "request.headers['environment'] == 'staging' || request.headers['environment'] == 'dev'"), + // the request_headers action produces a distinct list of descriptors for each possible value of the ‘environment’ header even though the granted quota is same. + // Thus, the client will use this id information (returned from RLS server) to correctly correlate the multiple descriptors/descriptor sets that have been granted with same quota (i.e., share the same quota among multiple descriptors or descriptor sets.) + // + // If id is empty, this id field will be ignored. If quota for the same id changes (e.g. due to configuration update), the old quota will be overridden by the new one. Shared quotas referenced by ID will still adhere to expiration after `valid_until`. + string id = 3; } // [#next-free-field: 6] @@ -133,12 +149,9 @@ message RateLimitResponse { // Duration until reset of the current limit window. google.protobuf.Duration duration_until_reset = 4; - // Quota granted for the descriptor. This is a certain number of requests over a period of time. - // The client may cache this result and apply the effective RateLimitResponse to future matching - // requests containing a matching descriptor without querying rate limit service. - // // Quota is available for a request if its descriptor set has cached quota available for all // descriptors. + // This is for each individual descriptor in the descriptor set. The client will perform matches for each individual descriptor against available per-descriptor quota. // // If quota is available, a RLS request will not be made and the quota will be reduced by 1 for // all matching descriptors. @@ -159,10 +172,6 @@ message RateLimitResponse { // If the server did not provide a quota, such as the quota message is empty for some of // the descriptors, then the request admission is determined by the // :ref:`overall_code `. - // - // When quota expires due to timeout, a new RLS request will also be made. - // The implementation may choose to preemptively query the rate limit server for more quota on or - // before expiration or before the available quota runs out. // [#not-implemented-hide:] Quota quota = 5; } @@ -193,4 +202,17 @@ message RateLimitResponse { // - :ref:`envoy.filters.network.ratelimit ` for network filter. // - :ref:`envoy.filters.thrift.rate_limit ` for Thrift filter. google.protobuf.Struct dynamic_metadata = 6; + + // Quota is available for a request if its entire descriptor set has cached quota available. + // This is a union of all descriptors in the descriptor set. Clients can use the quota for future matches if and only if the descriptor set matches what was sent in the request that originated this response. + // + // If quota is available, a RLS request will not be made and the quota will be reduced by 1. + // If quota is not available (i.e., a cached entry doesn't exist for a RLS descriptor set), a RLS request will be triggered. + // If the server did not provide a quota, such as the quota message is empty then the request admission is determined by the + // :ref:`overall_code `. + // + // If there is not sufficient quota and the cached entry exists for a RLS descriptor set is out-of-quota but not expired, + // the request will be treated as OVER_LIMIT. + // [#not-implemented-hide:] + Quota quota = 7; } diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD b/api/envoy/watchdog/v3/BUILD similarity index 100% rename from generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD rename to api/envoy/watchdog/v3/BUILD diff --git a/api/envoy/watchdog/v3alpha/README.md b/api/envoy/watchdog/v3/README.md similarity index 100% rename from api/envoy/watchdog/v3alpha/README.md rename to api/envoy/watchdog/v3/README.md diff --git a/api/envoy/watchdog/v3alpha/abort_action.proto b/api/envoy/watchdog/v3/abort_action.proto similarity index 85% rename from api/envoy/watchdog/v3alpha/abort_action.proto rename to api/envoy/watchdog/v3/abort_action.proto index d6f34aa892cdb..325c3d3dc7a85 100644 --- a/api/envoy/watchdog/v3alpha/abort_action.proto +++ b/api/envoy/watchdog/v3/abort_action.proto @@ -1,15 +1,14 @@ syntax = "proto3"; -package envoy.watchdog.v3alpha; +package envoy.watchdog.v3; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; -option java_package = "io.envoyproxy.envoy.watchdog.v3alpha"; +option java_package = "io.envoyproxy.envoy.watchdog.v3"; option java_outer_classname = "AbortActionProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.] diff --git a/api/tools/generate_listeners_test.py b/api/tools/generate_listeners_test.py index f67ef4bbb5aab..1defb3f666986 100644 --- a/api/tools/generate_listeners_test.py +++ b/api/tools/generate_listeners_test.py @@ -5,7 +5,7 @@ import generate_listeners if __name__ == "__main__": - srcdir = os.path.join(os.getenv("TEST_SRCDIR"), 'envoy_api_canonical') + srcdir = os.path.join(os.getenv("TEST_SRCDIR"), 'envoy_api') generate_listeners.generate_listeners( os.path.join(srcdir, "examples/service_envoy/listeners.pb"), "/dev/stdout", "/dev/stdout", iter([os.path.join(srcdir, "examples/service_envoy/http_connection_manager.pb")])) diff --git a/api/tools/tap2pcap.py b/api/tools/tap2pcap.py index 93a8610399285..bcb13fdf9a093 100644 --- a/api/tools/tap2pcap.py +++ b/api/tools/tap2pcap.py @@ -8,7 +8,7 @@ Usage: -bazel run @envoy_api_canonical//tools:tap2pcap +bazel run @envoy_api//tools:tap2pcap Known issues: - IPv6 PCAP generation has malformed TCP packets. This appears to be a text2pcap diff --git a/api/tools/tap2pcap_test.py b/api/tools/tap2pcap_test.py index fd13cf32ff694..c0151846f5e18 100644 --- a/api/tools/tap2pcap_test.py +++ b/api/tools/tap2pcap_test.py @@ -11,7 +11,7 @@ # a golden output file for the tshark dump. Since we run tap2pcap in a # subshell with a limited environment, the inferred time zone should be UTC. if __name__ == '__main__': - srcdir = os.path.join(os.getenv('TEST_SRCDIR'), 'envoy_api_canonical') + srcdir = os.path.join(os.getenv('TEST_SRCDIR'), 'envoy_api') tap_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.pb_text') expected_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.txt') pcap_path = os.path.join(os.getenv('TEST_TMPDIR'), 'generated.pcap') diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 61af4c4764680..8febd11d209e2 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -16,10 +16,14 @@ proto_library( "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", + "//contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha:pkg", + "//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg", + "//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg", "//envoy/admin/v3:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/key_value/v3:pkg", "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", @@ -45,15 +49,14 @@ proto_library( "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", - "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg", + "//envoy/extensions/access_loggers/open_telemetry/v3:pkg", "//envoy/extensions/access_loggers/stream/v3:pkg", "//envoy/extensions/access_loggers/wasm/v3:pkg", - "//envoy/extensions/cache/simple_http_cache/v3alpha:pkg", + "//envoy/extensions/cache/simple_http_cache/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", "//envoy/extensions/common/matching/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", @@ -65,14 +68,14 @@ proto_library( "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/common/matcher/action/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", - "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", + "//envoy/extensions/filters/http/admission_control/v3:pkg", "//envoy/extensions/filters/http/alternate_protocols_cache/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", - "//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg", + "//envoy/extensions/filters/http/bandwidth_limit/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", - "//envoy/extensions/filters/http/cache/v3alpha:pkg", - "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", + "//envoy/extensions/filters/http/cache/v3:pkg", + "//envoy/extensions/filters/http/cdn_loop/v3:pkg", "//envoy/extensions/filters/http/composite/v3:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", @@ -81,7 +84,7 @@ proto_library( "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", + "//envoy/extensions/filters/http/ext_proc/v3:pkg", "//envoy/extensions/filters/http/fault/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg", @@ -96,7 +99,7 @@ proto_library( "//envoy/extensions/filters/http/kill_request/v3:pkg", "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", "//envoy/extensions/filters/http/lua/v3:pkg", - "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", + "//envoy/extensions/filters/http/oauth2/v3:pkg", "//envoy/extensions/filters/http/on_demand/v3:pkg", "//envoy/extensions/filters/http/original_src/v3:pkg", "//envoy/extensions/filters/http/ratelimit/v3:pkg", @@ -124,14 +127,14 @@ proto_library( "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", - "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", + "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/router/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", - "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/formatter/metadata/v3:pkg", "//envoy/extensions/formatter/req_without_query/v3:pkg", @@ -150,6 +153,7 @@ proto_library( "//envoy/extensions/quic/crypto_stream/v3:pkg", "//envoy/extensions/quic/proof_source/v3:pkg", "//envoy/extensions/rate_limit_descriptors/expr/v3:pkg", + "//envoy/extensions/rbac/matchers/upstream_ip_port/v3:pkg", "//envoy/extensions/request_id/uuid/v3:pkg", "//envoy/extensions/resource_monitors/fixed_heap/v3:pkg", "//envoy/extensions/resource_monitors/injected_resource/v3:pkg", @@ -163,7 +167,7 @@ proto_library( "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/s2a/v3alpha:pkg", + "//envoy/extensions/transport_sockets/s2a/v3:pkg", "//envoy/extensions/transport_sockets/starttls/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", @@ -173,14 +177,14 @@ proto_library( "//envoy/extensions/upstreams/http/v3:pkg", "//envoy/extensions/upstreams/tcp/generic/v3:pkg", "//envoy/extensions/wasm/v3:pkg", - "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", + "//envoy/extensions/watchdog/profile_action/v3:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", "//envoy/service/cluster/v3:pkg", "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", - "//envoy/service/ext_proc/v3alpha:pkg", + "//envoy/service/ext_proc/v3:pkg", "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", @@ -198,7 +202,7 @@ proto_library( "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", - "//envoy/watchdog/v3alpha:pkg", + "//envoy/watchdog/v3:pkg", ], ) diff --git a/bazel/BUILD b/bazel/BUILD index 303ab531bead3..3b22ffc8ff878 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -1,8 +1,10 @@ load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp") +load("//bazel:utils.bzl", "json_data") load("@bazel_skylib//lib:selects.bzl", "selects") load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") +load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC") licenses(["notice"]) # Apache 2 @@ -591,3 +593,8 @@ alias( name = "windows", actual = "@bazel_tools//src/conditions:windows", ) + +json_data( + name = "repository_locations", + data = REPOSITORY_LOCATIONS_SPEC, +) diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index 34fc92b21f123..9820ff4cf993d 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -88,7 +88,7 @@ The name of the dependency can be found in [the repository locations file.](https://github.com/envoyproxy/envoy/blob/main/bazel/repository_locations.bzl) The path of the local copy has to be absolute path. -For repositories built by `envoy_cmake_external()` in `bazel/foreign_cc/BUILD`, +For repositories built by `envoy_cmake()` in `bazel/foreign_cc/BUILD`, it is necessary to populate the local copy with some additional Bazel machinery to support `--override_repository`: 1. Place an empty `WORKSPACE` in the root. diff --git a/bazel/README.md b/bazel/README.md index 3828e675a0b37..9337efb33ca81 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -20,7 +20,7 @@ On Windows, run the following commands: ```cmd mkdir %USERPROFILE%\bazel powershell Invoke-WebRequest https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-windows-amd64.exe -OutFile %USERPROFILE%\bazel\bazel.exe -set PATH=%PATH%;%USERPROFILE%\bazel +set PATH=%USERPROFILE%\bazel;%PATH% ``` ## Production environments @@ -30,7 +30,7 @@ dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#require independently sourced, the following steps should be followed: 1. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#requirements). -1. `bazel build -c opt //source/exe:envoy-static` from the repository root. +1. `bazel build -c opt envoy` from the repository root. ## Quick start Bazel build for developers @@ -154,8 +154,8 @@ for how to update or override dependencies. package. ```cmd mklink %USERPROFILE%\Python39\python3.exe %USERPROFILE%\Python39\python.exe - set PATH=%PATH%;%USERPROFILE%\Python39 - set PATH=%PATH%;%USERPROFILE%\Python39\Scripts + set PATH=%USERPROFILE%\Python39;%PATH% + set PATH=%USERPROFILE%\Python39\Scripts;%PATH% pip install wheel ``` @@ -169,7 +169,7 @@ for how to update or override dependencies. which is determined by their relative ordering in your PATH. ```cmd set BAZEL_VC=%USERPROFILE%\VSBT2019\VC - set PATH=%PATH%;%USERPROFILE%\VSBT2019\VC\Tools\MSVC\14.26.28801\bin\Hostx64\x64 + set PATH=%USERPROFILE%\VSBT2019\VC\Tools\MSVC\14.26.28801\bin\Hostx64\x64;%PATH% ``` The Windows SDK contains header files and libraries you need when building Windows applications. Bazel always uses the latest, but you can specify a different version by setting the environment variable `BAZEL_WINSDK_FULL_VERSION`. See [bazel/windows](https://docs.bazel.build/versions/master/windows.html) @@ -179,8 +179,8 @@ for how to update or override dependencies. the project's GCP CI remote build environment, so 64 bit builds from the CMake and ninja projects are used instead. ```cmd - set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin - set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja + set PATH=%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin;%PATH% + set PATH=%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja;%PATH% ``` [MSYS2 shell](https://msys2.github.io/): Install to a path with no spaces, e.g. C:\msys64. @@ -189,7 +189,7 @@ for how to update or override dependencies. executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment variable to a value of `*` is often advisable to ensure argument parsing in the MSYS2 shell behaves as expected. ```cmd - set PATH=%PATH%;%USERPROFILE%\msys64\usr\bin + set PATH=%USERPROFILE%\msys64\usr\bin;%PATH% set BAZEL_SH=%USERPROFILE%\msys64\usr\bin\bash.exe set MSYS2_ARG_CONV_EXCL=* set MSYS2_PATH_TYPE=inherit @@ -216,7 +216,7 @@ for how to update or override dependencies. [Git](https://git-scm.com/downloads): This version from the Git project, or the version distributed using pacman under MSYS2 will both work, ensure one is on the PATH:. ```cmd - set PATH=%PATH%;%USERPROFILE%\Git\bin + set PATH=%USERPROFILE%\Git\bin;%PATH% ``` Lastly, persist environment variable changes. @@ -236,7 +236,7 @@ for how to update or override dependencies. in your shell for buildifier to work. 1. `go get -u github.com/bazelbuild/buildtools/buildozer` to install buildozer. You may need to set `BUILDOZER_BIN` to `$GOPATH/bin/buildozer` in your shell for buildozer to work. -1. `bazel build //source/exe:envoy-static` from the Envoy source directory. Add `-c opt` for an optimized release build or +1. `bazel build envoy` from the Envoy source directory. Add `-c opt` for an optimized release build or `-c dbg` for an unoptimized, fully instrumented debugging build. ## Building Envoy with the CI Docker image @@ -270,7 +270,7 @@ To build Envoy with a remote build services, run Bazel with your remote build se For example the following command runs build with the GCP RBE service used in CI: ``` -bazel build //source/exe:envoy-static --config=remote-clang \ +bazel build envoy --config=remote-clang \ --remote_cache=grpcs://remotebuildexecution.googleapis.com \ --remote_executor=grpcs://remotebuildexecution.googleapis.com \ --remote_instance_name=projects/envoy-ci/instances/default_instance @@ -289,7 +289,7 @@ Building Envoy with Docker sandbox uses the same Docker image used in CI with fi output which is not depending on your local C++ toolchain. It can also help debugging issues with RBE. To build Envoy with Docker sandbox: ``` -bazel build //source/exe:envoy-static --config=docker-clang +bazel build envoy --config=docker-clang ``` Tests can be run in docker sandbox too. Note that the network environment, such as IPv6, may be different in the docker sandbox so you may want @@ -299,7 +299,7 @@ set different options. See below to configure test IP versions. To link Envoy against libc++, follow the [quick start](#quick-start-bazel-build-for-developers) to setup Clang+LLVM and run: ``` -bazel build --config=libc++ //source/exe:envoy-static +bazel build --config=libc++ envoy ``` Or use our configuration with Remote Execution or Docker sandbox, pass `--config=remote-clang-libc++` or @@ -522,14 +522,14 @@ that Bazel supports: You can use the `-c ` flag to control this, e.g. ``` -bazel build -c opt //source/exe:envoy-static +bazel build -c opt envoy ``` To override the compilation mode and optimize the build for binary size, you can use the `sizeopt` configuration: ``` -bazel build //source/exe:envoy-static --config=sizeopt +bazel build envoy --config=sizeopt ``` ## Sanitizers @@ -751,7 +751,7 @@ They should also ignore any local `.bazelrc` for reproducibility. This can be achieved with: ``` -bazel --bazelrc=/dev/null build -c opt //source/exe:envoy-static.stripped +bazel --bazelrc=/dev/null build -c opt envoy.stripped ``` One caveat to note is that the Git SHA1 is truncated to 16 bytes today as a @@ -818,7 +818,7 @@ resources, you can override Bazel's default job parallelism determination with `--jobs=N` to restrict the build to at most `N` simultaneous jobs, e.g.: ``` -bazel build --jobs=2 //source/exe:envoy-static +bazel build --jobs=2 envoy ``` # Debugging the Bazel build @@ -827,19 +827,19 @@ When trying to understand what Bazel is doing, the `-s` and `--explain` options are useful. To have Bazel provide verbose output on which commands it is executing: ``` -bazel build -s //source/exe:envoy-static +bazel build -s envoy ``` To have Bazel emit to a text file the rationale for rebuilding a target: ``` -bazel build --explain=file.txt //source/exe:envoy-static +bazel build --explain=file.txt envoy ``` To get more verbose explanations: ``` -bazel build --explain=file.txt --verbose_explanations //source/exe:envoy-static +bazel build --explain=file.txt --verbose_explanations envoy ``` # Resolving paths in bazel build output diff --git a/bazel/api_binding.bzl b/bazel/api_binding.bzl index 362e1803a1ef8..97718ef5346b9 100644 --- a/bazel/api_binding.bzl +++ b/bazel/api_binding.bzl @@ -13,6 +13,7 @@ def _default_envoy_api_impl(ctx): ] for d in api_dirs: ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child(ctx.attr.reldir).get_child(d), d) + ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child("bazel").get_child("utils.bzl"), "utils.bzl") _default_envoy_api = repository_rule( implementation = _default_envoy_api_impl, @@ -24,14 +25,9 @@ _default_envoy_api = repository_rule( def envoy_api_binding(): # Treat the data plane API as an external repo, this simplifies exporting - # the API to https://github.com/envoyproxy/data-plane-api. This is the - # shadow API for Envoy internal use, see #9479. + # the API to https://github.com/envoyproxy/data-plane-api. if "envoy_api" not in native.existing_rules().keys(): - _default_envoy_api(name = "envoy_api", reldir = "generated_api_shadow") - - # We also provide the non-shadowed API for developer use (see #9479). - if "envoy_api_raw" not in native.existing_rules().keys(): - _default_envoy_api(name = "envoy_api_canonical", reldir = "api") + _default_envoy_api(name = "envoy_api", reldir = "api") # TODO(https://github.com/envoyproxy/envoy/issues/7719) need to remove both bindings and use canonical rules native.bind( diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index 7c806b08c98a6..b382e3fd02cd9 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -1,6 +1,5 @@ -load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") +load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies") load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") -load("@envoy_build_tools//toolchains:rbe_toolchains_config.bzl", "rbe_toolchains_config") load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe_exec_properties_dict", "custom_exec_properties") load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies") @@ -9,15 +8,16 @@ load("@upb//bazel:workspace_deps.bzl", "upb_deps") load("@rules_rust//rust:repositories.bzl", "rust_repositories") load("@rules_antlr//antlr:deps.bzl", "antlr_dependencies") load("@proxy_wasm_rust_sdk//bazel:dependencies.bzl", "proxy_wasm_rust_sdk_dependencies") +load("@rules_cc//cc:repositories.bzl", "rules_cc_dependencies", "rules_cc_toolchains") # go version for rules_go GO_VERSION = "1.15.5" def envoy_dependency_imports(go_version = GO_VERSION): - rules_foreign_cc_dependencies() + # TODO: allow building of tools for easier onboarding + rules_foreign_cc_dependencies(register_default_tools = False, register_built_tools = False) go_rules_dependencies() go_register_toolchains(go_version) - rbe_toolchains_config() gazelle_dependencies() apple_rules_dependencies() rust_repositories() @@ -28,6 +28,8 @@ def envoy_dependency_imports(go_version = GO_VERSION): oss_fuzz = True, honggfuzz = False, ) + rules_cc_dependencies() + rules_cc_toolchains() custom_exec_properties( name = "envoy_large_machine_exec_property", diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 4d671ab9562fa..f48ebe70564e9 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -1,6 +1,6 @@ # The main Envoy bazel file. Load this file for all Envoy-specific build macros # and rules that you'd like to use in your BUILD files. -load("@rules_foreign_cc//tools/build_defs:cmake.bzl", "cmake_external") +load("@rules_foreign_cc//foreign_cc:cmake.bzl", "cmake") load(":envoy_binary.bzl", _envoy_cc_binary = "envoy_cc_binary") load(":envoy_internal.bzl", "envoy_external_dep_path") load( @@ -44,6 +44,7 @@ load( ) load( "@envoy_build_config//:extensions_build_config.bzl", + "CONTRIB_EXTENSION_PACKAGE_VISIBILITY", "EXTENSION_PACKAGE_VISIBILITY", ) load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") @@ -65,7 +66,7 @@ def envoy_extension_package(enabled_default = True, default_visibility = EXTENSI ) def envoy_contrib_package(): - envoy_extension_package(default_visibility = ["//:contrib_library"]) + envoy_extension_package(default_visibility = CONTRIB_EXTENSION_PACKAGE_VISIBILITY) # A genrule variant that can output a directory. This is useful when doing things like # generating a fuzz corpus mechanically. @@ -91,15 +92,12 @@ envoy_directory_genrule = rule( # External CMake C++ library targets should be specified with this function. This defaults # to building the dependencies with ninja -def envoy_cmake_external( +def envoy_cmake( name, cache_entries = {}, debug_cache_entries = {}, - cmake_options = ["-GNinja"], - make_commands = ["ninja -v", "ninja -v install"], lib_source = "", postfix_script = "", - static_libraries = [], copy_pdb = False, pdb_name = "", cmake_files_dir = "$BUILD_TMPDIR/CMakeFiles", @@ -127,22 +125,23 @@ def envoy_cmake_external( else: pf = postfix_script - cmake_external( + cmake( name = name, cache_entries = select({ "@envoy//bazel:dbg_build": cache_entries_debug, "//conditions:default": cache_entries, }), - cmake_options = cmake_options, + generate_args = ["-GNinja"], + targets = ["", "install"], + # TODO: Remove install target and make this work + install = False, # TODO(lizan): Make this always true generate_crosstool_file = select({ "@envoy//bazel:windows_x86_64": True, "//conditions:default": generate_crosstool_file, }), lib_source = lib_source, - make_commands = make_commands, postfix_script = pf, - static_libraries = static_libraries, **kwargs ) diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index 6f9c9d83e30b3..9c5130f15e4b7 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -51,9 +51,9 @@ def envoy_copts(repository, test = False): # debugging info detailing some 1600 test binaries would be wasteful. # targets listed in order from generic to increasing specificity. # Bazel adds an implicit -DNDEBUG for opt targets. - repository + "//bazel:opt_build": [] if test else ["-ggdb3", "-gsplit-dwarf"], + repository + "//bazel:opt_build": [] if test else ["-ggdb3"], repository + "//bazel:fastbuild_build": [], - repository + "//bazel:dbg_build": ["-ggdb3", "-gsplit-dwarf"], + repository + "//bazel:dbg_build": ["-ggdb3"], repository + "//bazel:windows_opt_build": [] if test else ["-Z7"], repository + "//bazel:windows_fastbuild_build": [], repository + "//bazel:windows_dbg_build": [], diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index ac74d1be29c96..5b1d674483c8a 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -12,6 +12,7 @@ load(":envoy_pch.bzl", "envoy_pch_copts") load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") load( "@envoy_build_config//:extensions_build_config.bzl", + "CONTRIB_EXTENSION_PACKAGE_VISIBILITY", "EXTENSION_CONFIG_VISIBILITY", ) @@ -75,7 +76,7 @@ def envoy_cc_contrib_extension( name, tags = [], extra_visibility = [], - visibility = ["//:contrib_library"], + visibility = CONTRIB_EXTENSION_PACKAGE_VISIBILITY, **kwargs): envoy_cc_extension(name, tags, extra_visibility, visibility, **kwargs) diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 799e60154afc5..0cd48ba286200 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -169,10 +169,12 @@ def envoy_cc_test( linkstatic = envoy_linkstatic(), malloc = tcmalloc_external_dep(repository), deps = envoy_stdlib_deps() + deps + [envoy_external_dep_path(dep) for dep in external_deps + ["googletest"]] + [ - repository + "//test:test_pch", repository + "//test:main", repository + "//test/test_common:test_version_linkstamp", - ], + ] + select({ + repository + "//bazel:clang_pch_build": [repository + "//test:test_pch"], + "//conditions:default": [], + }), # from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51 # 2 - by default, mocks act as StrictMocks. args = args + ["--gmock_default_mock_behavior=2"], diff --git a/bazel/external/envoy_build_tools.patch b/bazel/external/envoy_build_tools.patch new file mode 100644 index 0000000000000..33d5362b45f66 --- /dev/null +++ b/bazel/external/envoy_build_tools.patch @@ -0,0 +1,39 @@ +diff --git a/toolchains/configs/linux/clang/cc/cc_toolchain_config.bzl b/toolchains/configs/linux/clang/cc/cc_toolchain_config.bzl +index 5dbaa86..3c90e3b 100755 +--- a/toolchains/configs/linux/clang/cc/cc_toolchain_config.bzl ++++ b/toolchains/configs/linux/clang/cc/cc_toolchain_config.bzl +@@ -386,7 +386,7 @@ def _impl(ctx): + ], + flag_groups = [ + flag_group( +- flags = ["-gsplit-dwarf"], ++ flags = ["-gsplit-dwarf", "-g"], + expand_if_available = "per_object_debug_info_file", + ), + ], +diff --git a/toolchains/configs/linux/clang_libcxx/cc/cc_toolchain_config.bzl b/toolchains/configs/linux/clang_libcxx/cc/cc_toolchain_config.bzl +index 5dbaa86..3c90e3b 100755 +--- a/toolchains/configs/linux/clang_libcxx/cc/cc_toolchain_config.bzl ++++ b/toolchains/configs/linux/clang_libcxx/cc/cc_toolchain_config.bzl +@@ -386,7 +386,7 @@ def _impl(ctx): + ], + flag_groups = [ + flag_group( +- flags = ["-gsplit-dwarf"], ++ flags = ["-gsplit-dwarf", "-g"], + expand_if_available = "per_object_debug_info_file", + ), + ], +diff --git a/toolchains/configs/linux/gcc/cc/cc_toolchain_config.bzl b/toolchains/configs/linux/gcc/cc/cc_toolchain_config.bzl +index 5dbaa86..3c90e3b 100755 +--- a/toolchains/configs/linux/gcc/cc/cc_toolchain_config.bzl ++++ b/toolchains/configs/linux/gcc/cc/cc_toolchain_config.bzl +@@ -386,7 +386,7 @@ def _impl(ctx): + ], + flag_groups = [ + flag_group( +- flags = ["-gsplit-dwarf"], ++ flags = ["-gsplit-dwarf", "-g"], + expand_if_available = "per_object_debug_info_file", + ), + ], diff --git a/bazel/external/googleurl.patch b/bazel/external/googleurl.patch index e124821f9acee..8c27b8f327ceb 100644 --- a/bazel/external/googleurl.patch +++ b/bazel/external/googleurl.patch @@ -2,22 +2,22 @@ # project using clang-cl. Tracked in https://github.com/envoyproxy/envoy/issues/11974. diff --git a/base/compiler_specific.h b/base/compiler_specific.h -index 0cd36dc..8c4cbd4 100644 +index 6651220..a469c19 100644 --- a/base/compiler_specific.h +++ b/base/compiler_specific.h @@ -7,10 +7,6 @@ - + #include "build/build_config.h" - + -#if defined(COMPILER_MSVC) && !defined(__clang__) -#error "Only clang-cl is supported on Windows, see https://crbug.com/988071" -#endif - - // Annotate a variable indicating it's ok if the variable is not used. - // (Typically used to silence a compiler warning when the assignment - // is important for some other reason.) -@@ -55,8 +51,12 @@ - // prevent code folding, see gurl_base::debug::Alias. + // This is a wrapper around `__has_cpp_attribute`, which can be used to test for + // the presence of an attribute. In case the compiler does not support this + // macro it will simply evaluate to 0. +@@ -75,8 +71,12 @@ + // prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h. // Use like: // void NOT_TAIL_CALLED FooBar(); -#if defined(__clang__) && __has_attribute(not_tail_called) @@ -30,10 +30,10 @@ index 0cd36dc..8c4cbd4 100644 #else #define NOT_TAIL_CALLED #endif -@@ -226,7 +226,9 @@ +@@ -273,7 +273,9 @@ #endif #endif - + -#if defined(__clang__) && __has_attribute(uninitialized) +#if defined(__clang__) +#if defined(__has_attribute) @@ -41,7 +41,7 @@ index 0cd36dc..8c4cbd4 100644 // Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for // the specified variable. // Library-wide alternative is -@@ -257,6 +259,8 @@ +@@ -304,6 +306,8 @@ // E.g. platform, bot, benchmark or test name in patch description or next to // the attribute. #define STACK_UNINITIALIZED __attribute__((uninitialized)) @@ -50,13 +50,74 @@ index 0cd36dc..8c4cbd4 100644 #else #define STACK_UNINITIALIZED #endif +@@ -365,8 +369,12 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) { + #endif // defined(__clang_analyzer__) + + // Use nomerge attribute to disable optimization of merging multiple same calls. +-#if defined(__clang__) && __has_attribute(nomerge) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(nomerge) + #define NOMERGE [[clang::nomerge]] ++#endif ++#endif + #else + #define NOMERGE + #endif +@@ -392,8 +400,12 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) { + // See also: + // https://clang.llvm.org/docs/AttributeReference.html#trivial-abi + // https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html +-#if defined(__clang__) && __has_attribute(trivial_abi) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(trivial_abi) + #define TRIVIAL_ABI [[clang::trivial_abi]] ++#endif ++#endif + #else + #define TRIVIAL_ABI + #endif +@@ -401,8 +413,12 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) { + // Marks a member function as reinitializing a moved-from variable. + // See also + // https://clang.llvm.org/extra/clang-tidy/checks/bugprone-use-after-move.html#reinitialization +-#if defined(__clang__) && __has_attribute(reinitializes) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(reinitializes) + #define REINITIALIZES_AFTER_MOVE [[clang::reinitializes]] ++#endif ++#endif + #else + #define REINITIALIZES_AFTER_MOVE + #endif + +# TODO(keith): Remove once bazel supports newer NDK versions https://github.com/bazelbuild/bazel/issues/12889 + +diff --git a/base/containers/checked_iterators.h b/base/containers/checked_iterators.h +index b5fe925..31aa81e 100644 +--- a/base/containers/checked_iterators.h ++++ b/base/containers/checked_iterators.h +@@ -237,9 +237,11 @@ using CheckedContiguousConstIterator = CheckedContiguousIterator; + // [3] https://wg21.link/pointer.traits.optmem + namespace std { + ++#ifdef SUPPORTS_CPP_17_CONTIGUOUS_ITERATOR + template + struct __is_cpp17_contiguous_iterator<::gurl_base::CheckedContiguousIterator> + : true_type {}; ++#endif + + template + struct pointer_traits<::gurl_base::CheckedContiguousIterator> { # TODO(dio): Consider to remove the following patch when we have IDN-free optional build for URL # library from the upstream Chromium project. This is tracked in: # https://github.com/envoyproxy/envoy/issues/14743. diff --git a/url/BUILD b/url/BUILD -index f2ec8da..4e2d55b 100644 +index f2ec8da..df69661 100644 --- a/url/BUILD +++ b/url/BUILD @@ -52,3 +52,27 @@ cc_library( diff --git a/bazel/external/kafka_int32.patch b/bazel/external/kafka_int32.patch deleted file mode 100644 index 8b88fe3358211..0000000000000 --- a/bazel/external/kafka_int32.patch +++ /dev/null @@ -1,27 +0,0 @@ ---- DescribeGroupsResponse.json 2020-03-25 16:12:16.373302600 -0400 -+++ DescribeGroupsResponse.json 2020-03-25 16:11:16.184156200 -0400 -@@ -63,7 +63,7 @@ - { "name": "MemberAssignment", "type": "bytes", "versions": "0+", - "about": "The current assignment provided by the group leader." } - ]}, -- { "name": "AuthorizedOperations", "type": "int32", "versions": "3+", "default": "-2147483648", -+ { "name": "AuthorizedOperations", "type": "int32", "versions": "3+", "default": "INT32_MIN", - "about": "32-bit bitfield to represent authorized operations for this group." } - ]} - ] - ---- MetadataResponse.json 2020-03-25 15:53:36.319161000 -0400 -+++ MetadataResponse.json 2020-03-25 15:54:11.510400000 -0400 -@@ -81,10 +81,10 @@ - { "name": "OfflineReplicas", "type": "[]int32", "versions": "5+", "ignorable": true, - "about": "The set of offline replicas of this partition." } - ]}, -- { "name": "TopicAuthorizedOperations", "type": "int32", "versions": "8+", "default": "-2147483648", -+ { "name": "TopicAuthorizedOperations", "type": "int32", "versions": "8+", "default": "INT32_MIN", - "about": "32-bit bitfield to represent authorized operations for this topic." } - ]}, -- { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8+", "default": "-2147483648", -+ { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8+", "default": "INT32_MIN", - "about": "32-bit bitfield to represent authorized operations for this cluster." } - ] - } diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index f8a1079ac93cd..8866c95b53eea 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -1266,6 +1266,7 @@ envoy_cc_library( visibility = ["//visibility:public"], deps = [ ":quic_core_arena_scoped_ptr_lib", + ":quic_core_connection_context_lib", ":quic_core_time_lib", ], ) @@ -2288,6 +2289,21 @@ envoy_cc_library( deps = [":quic_core_types_lib"], ) +envoy_cc_library( + name = "quic_core_http_capsule_lib", + srcs = ["quiche/quic/core/http/capsule.cc"], + hdrs = ["quiche/quic/core/http/capsule.h"], + copts = quiche_copts, + repository = "@envoy", + deps = [ + ":quic_core_buffer_allocator_lib", + ":quic_core_data_lib", + ":quic_core_http_http_frames_lib", + ":quic_core_types_lib", + ":quic_platform_base", + ], +) + envoy_cc_library( name = "quic_core_http_client_lib", srcs = [ @@ -2386,6 +2402,7 @@ envoy_cc_library( repository = "@envoy", tags = ["nofips"], deps = [ + ":quic_core_http_http_constants_lib", ":quic_core_types_lib", ":quic_platform_base", ":spdy_core_framer_lib", @@ -2429,6 +2446,7 @@ envoy_cc_library( "quiche/quic/core/http/quic_spdy_session.cc", "quiche/quic/core/http/quic_spdy_stream.cc", "quiche/quic/core/http/web_transport_http3.cc", + "quiche/quic/core/http/web_transport_stream_adapter.cc", ], hdrs = [ "quiche/quic/core/http/quic_headers_stream.h", @@ -2439,6 +2457,7 @@ envoy_cc_library( "quiche/quic/core/http/quic_spdy_session.h", "quiche/quic/core/http/quic_spdy_stream.h", "quiche/quic/core/http/web_transport_http3.h", + "quiche/quic/core/http/web_transport_stream_adapter.h", ], copts = quiche_copts, repository = "@envoy", @@ -2448,6 +2467,7 @@ envoy_cc_library( ":quic_core_connection_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_error_codes_lib", + ":quic_core_http_capsule_lib", ":quic_core_http_header_list_lib", ":quic_core_http_http_constants_lib", ":quic_core_http_http_decoder_lib", @@ -2466,7 +2486,6 @@ envoy_cc_library( ":quic_core_utils_lib", ":quic_core_versions_lib", ":quic_core_web_transport_interface_lib", - ":quic_core_web_transport_stream_adapter", ":quic_platform_base", ":quic_platform_mem_slice_storage", ":spdy_core_framer_lib", @@ -3149,19 +3168,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "quic_core_web_transport_stream_adapter", - srcs = ["quiche/quic/core/web_transport_stream_adapter.cc"], - hdrs = ["quiche/quic/core/web_transport_stream_adapter.h"], - copts = quiche_copts, - repository = "@envoy", - tags = ["nofips"], - deps = [ - ":quic_core_session_lib", - ":quic_core_web_transport_interface_lib", - ], -) - envoy_cc_library( name = "quic_core_server_id_lib", srcs = ["quiche/quic/core/quic_server_id.cc"], diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 2c9b481282cb0..e55355fe8f534 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -1,6 +1,6 @@ load("@rules_cc//cc:defs.bzl", "cc_library") -load("//bazel:envoy_build_system.bzl", "envoy_cmake_external", "envoy_package") -load("@rules_foreign_cc//tools/build_defs:configure.bzl", "configure_make") +load("//bazel:envoy_build_system.bzl", "envoy_cmake", "envoy_package") +load("@rules_foreign_cc//foreign_cc:configure.bzl", "configure_make") licenses(["notice"]) # Apache 2 @@ -20,12 +20,14 @@ configure_make( }), lib_source = "@com_github_gperftools_gperftools//:all", linkopts = ["-lpthread"], - make_commands = ["make install-libLTLIBRARIES install-perftoolsincludeHEADERS"], - static_libraries = select({ + out_static_libs = select({ "//bazel:debug_tcmalloc": ["libtcmalloc_debug.a"], "//conditions:default": ["libtcmalloc_and_profiler.a"], }), tags = ["skip_on_windows"], + targets = [ + "install-libLTLIBRARIES install-perftoolsincludeHEADERS", + ], ) # Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/227 @@ -44,14 +46,14 @@ configure_make( configure_in_place = True, configure_options = ["--disable-ssl --disable-gssapi --disable-lz4-ext --disable-zstd && cp Makefile.config src/.. && cp config.h src/.."], lib_source = "@edenhill_librdkafka//:all", - make_commands = [ - "make ARFLAGS='' libs install-subdirs", - ], - static_libraries = [ + out_static_libs = [ "librdkafka.a", "librdkafka++.a", ], tags = ["skip_on_windows"], + targets = [ + "ARFLAGS='' libs install-subdirs", + ], alwayslink = True, ) @@ -66,7 +68,7 @@ cc_library( configure_make( name = "luajit", configure_command = "build.py", - configure_env_vars = select({ + env = select({ # This shouldn't be needed! See # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed @@ -76,18 +78,18 @@ configure_make( "//conditions:default": {}, }), lib_source = "@com_github_luajit_luajit//:all", - make_commands = [], out_include_dir = "include/luajit-2.1", - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["lua51.lib"], "//conditions:default": ["libluajit-5.1.a"], }), + targets = [], ) configure_make( name = "moonjit", configure_command = "build.py", - configure_env_vars = select({ + env = select({ # This shouldn't be needed! See # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed @@ -96,13 +98,12 @@ configure_make( "//conditions:default": {}, }), lib_source = "@com_github_moonjit_moonjit//:all", - make_commands = [], out_include_dir = "include/moonjit-2.2", - static_libraries = ["libluajit-5.1.a"], + out_static_libs = ["libluajit-5.1.a"], tags = ["skip_on_windows"], ) -envoy_cmake_external( +envoy_cmake( name = "libsxg", cache_entries = { "CMAKE_BUILD_TYPE": "Release", @@ -112,15 +113,14 @@ envoy_cmake_external( "SXG_WITH_CERT_CHAIN": "off", "RUN_TEST": "off", "CMAKE_INSTALL_LIBDIR": "lib", - "CMAKE_TRY_COMPILE_TARGET_TYPE": "STATIC_LIBRARY", }, lib_source = "@com_github_google_libsxg//:all", - static_libraries = ["libsxg.a"], + out_static_libs = ["libsxg.a"], tags = ["skip_on_windows"], deps = ["@boringssl//:ssl"], ) -envoy_cmake_external( +envoy_cmake( name = "ares", cache_entries = { "CARES_BUILD_TOOLS": "no", @@ -135,17 +135,17 @@ envoy_cmake_external( "//bazel:apple": ["-lresolv"], "//conditions:default": [], }), - postfix_script = select({ - "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/ares_nameser.h $INSTALLDIR/include/ares_nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", - "//conditions:default": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", - }), - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["cares.lib"], "//conditions:default": ["libcares.a"], }), + postfix_script = select({ + "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/ares_nameser.h $INSTALLDIR/include/ares_nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", + "//conditions:default": "rm -f $INSTALLDIR/include/ares_dns.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", + }), ) -envoy_cmake_external( +envoy_cmake( name = "curl", cache_entries = { "BUILD_CURL_EXE": "off", @@ -186,7 +186,7 @@ envoy_cmake_external( defines = ["CURL_STATICLIB"], generate_crosstool_file = True, lib_source = "@com_github_curl//:all", - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["libcurl.lib"], "//conditions:default": ["libcurl.a"], }), @@ -198,7 +198,7 @@ envoy_cmake_external( ], ) -envoy_cmake_external( +envoy_cmake( name = "event", cache_entries = { "EVENT__DISABLE_OPENSSL": "on", @@ -215,7 +215,7 @@ envoy_cmake_external( "_GNU_SOURCE": "on", }, lib_source = "@com_github_libevent_libevent//:all", - static_libraries = select({ + out_static_libs = select({ # macOS organization of libevent is different from Windows/Linux. # Including libevent_core is a requirement on those platforms, but # results in duplicate symbols when built on macOS. @@ -236,7 +236,7 @@ envoy_cmake_external( }), ) -envoy_cmake_external( +envoy_cmake( name = "llvm", cache_entries = { # Disable both: BUILD and INCLUDE, since some of the INCLUDE @@ -267,7 +267,7 @@ envoy_cmake_external( # using -l:libstdc++.a. "CMAKE_CXX_FLAGS": "-lstdc++", }, - env_vars = { + env = { # Workaround for the -DDEBUG flag added in fastbuild on macOS, # which conflicts with DEBUG macro used in LLVM. "CFLAGS": "-UDEBUG", @@ -275,7 +275,7 @@ envoy_cmake_external( "ASMFLAGS": "-UDEBUG", }, lib_source = "@org_llvm_llvm//:all", - static_libraries = select({ + out_static_libs = select({ "//conditions:default": [ # Order from llvm-config --libnames asmparser core debuginfodwarf # engine lto mcparser mirparser orcjit passes runtimedyld @@ -336,7 +336,7 @@ envoy_cmake_external( alwayslink = True, ) -envoy_cmake_external( +envoy_cmake( name = "nghttp2", cache_entries = { "ENABLE_LIB_ONLY": "on", @@ -349,13 +349,13 @@ envoy_cmake_external( debug_cache_entries = {"ENABLE_DEBUG": "on"}, defines = ["NGHTTP2_STATICLIB"], lib_source = "@com_github_nghttp2_nghttp2//:all", - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["nghttp2.lib"], "//conditions:default": ["libnghttp2.a"], }), ) -envoy_cmake_external( +envoy_cmake( name = "wamr", cache_entries = { "LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm", @@ -368,14 +368,13 @@ envoy_cmake_external( "WAMR_BUILD_TAIL_CALL": "1", }, lib_source = "@com_github_wamr//:all", - static_libraries = ["libvmlib.a"], + out_static_libs = ["libvmlib.a"], tags = ["skip_on_windows"], deps = [":llvm"], ) -envoy_cmake_external( +envoy_cmake( name = "wavm", - binaries = ["wavm"], cache_entries = { "LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm", "WAVM_ENABLE_STATIC_LINKING": "on", @@ -385,7 +384,7 @@ envoy_cmake_external( # using -l:libstdc++.a. "CMAKE_CXX_FLAGS": "-lstdc++ -Wno-unused-command-line-argument", }, - env_vars = { + env = { # Workaround for the -DDEBUG flag added in fastbuild on macOS, # which conflicts with DEBUG macro used in LLVM. "CFLAGS": "-UDEBUG", @@ -393,7 +392,8 @@ envoy_cmake_external( "ASMFLAGS": "-UDEBUG", }, lib_source = "@com_github_wavm_wavm//:all", - static_libraries = select({ + out_binaries = ["wavm"], + out_static_libs = select({ "//conditions:default": [ "libWAVM.a", "libWAVMUnwind.a", @@ -403,7 +403,7 @@ envoy_cmake_external( deps = [":llvm"], ) -envoy_cmake_external( +envoy_cmake( name = "zlib", cache_entries = { "CMAKE_CXX_COMPILER_FORCED": "on", @@ -436,7 +436,7 @@ envoy_cmake_external( "//bazel:zlib_ng": "@com_github_zlib_ng_zlib_ng//:all", "//conditions:default": "@net_zlib//:all", }), - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["zlib.lib"], "//conditions:default": ["libz.a"], }), diff --git a/bazel/foreign_cc/zlib_ng.patch b/bazel/foreign_cc/zlib_ng.patch index 77b04ef09496c..b4b73279da3f6 100644 --- a/bazel/foreign_cc/zlib_ng.patch +++ b/bazel/foreign_cc/zlib_ng.patch @@ -1,12 +1,13 @@ + # Add support for compiling to WebAssembly using Emscripten. # https://github.com/zlib-ng/zlib-ng/pull/794 diff --git a/cmake/detect-arch.c b/cmake/detect-arch.c -index 5715535..2137691 100644 + --- a/cmake/detect-arch.c +++ b/cmake/detect-arch.c -@@ -93,6 +93,10 @@ - #elif defined(__THW_RS6000) - #error archfound rs6000 +@@ -101,6 +101,10 @@ + #error archfound riscv32 + #endif +// Emscripten (WebAssembly) +#elif defined(__EMSCRIPTEN__) @@ -16,16 +17,16 @@ index 5715535..2137691 100644 #else #error archfound unrecognized diff --git a/cmake/detect-arch.cmake b/cmake/detect-arch.cmake -index b80d666..c6cc214 100644 + --- a/cmake/detect-arch.cmake +++ b/cmake/detect-arch.cmake -@@ -85,6 +85,9 @@ elseif("${ARCH}" MATCHES "parisc") +@@ -85,6 +85,9 @@ elseif("${ARCH}" MATCHES "rs6000") set(BASEARCH "rs6000") set(BASEARCH_RS6000_FOUND TRUE) +elseif("${ARCH}" MATCHES "wasm32") + set(BASEARCH "wasm32") + set(BASEARCH_WASM32_FOUND TRUE) - else() - set(BASEARCH "x86") - set(BASEARCH_X86_FOUND TRUE) + elseif("${ARCH}" MATCHES "riscv(32|64)") + set(BASEARCH "riscv") + set(BASEARCH_RISCV_FOUND TRUE) diff --git a/bazel/protobuf.patch b/bazel/protobuf.patch index e786c7ebe1469..a6318ce8e49ff 100644 --- a/bazel/protobuf.patch +++ b/bazel/protobuf.patch @@ -4,15 +4,13 @@ new file mode 100644 index 0000000000..b66101a39a --- /dev/null +++ b/third_party/BUILD -@@ -0,0 +1 @@ +@@ -0,0 +1,1 @@ +exports_files(["six.BUILD", "zlib.BUILD"]) - -# patching for zlib binding diff --git a/BUILD b/BUILD -index efc3d8e7f..746ad4851 100644 +index 7de87f884..3f0fd5362 100644 --- a/BUILD +++ b/BUILD -@@ -24,7 +24,7 @@ config_setting( +@@ -19,7 +19,7 @@ exports_files(["LICENSE"]) # ZLIB configuration ################################################################################ @@ -22,13 +20,13 @@ index efc3d8e7f..746ad4851 100644 ################################################################################ # Protobuf Runtime Library diff --git a/python/google/protobuf/__init__.py b/python/google/protobuf/__init__.py -index 97ac28028..8b7585d9d 100644 +index cb4740412..91fe69ce5 100644 --- a/python/google/protobuf/__init__.py +++ b/python/google/protobuf/__init__.py @@ -31,3 +31,9 @@ # Copyright 2007 Google Inc. All Rights Reserved. - __version__ = '3.16.0' + __version__ = '3.18.0' + +if __name__ != '__main__': + try: diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index ae04a8918212c..ce8283d3777ee 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -52,6 +52,54 @@ _default_envoy_build_config = repository_rule( }, ) +def _envoy_repo_impl(repository_ctx): + """This provides information about the Envoy repository + + You can access the current version and path to the repository in .bzl/BUILD + files as follows: + + ```starlark + load("@envoy_repo//:version.bzl", "VERSION") + ``` + + `VERSION` can be used to derive version-specific rules and can be passed + to the rules. + + The `VERSION` and also the local `PATH` to the repo can be accessed in + python libraries/binaries. By adding `@envoy_repo` to `deps` they become + importable through the `envoy_repo` namespace. + + As the `PATH` is local to the machine, it is generally only useful for + jobs that will run locally. + + This can be useful for example, for tooling that needs to check the + repository, or to run bazel queries that cannot be run within the + constraints of a `genquery`. + + """ + repo_path = repository_ctx.path(repository_ctx.attr.envoy_root).dirname + version = repository_ctx.read(repo_path.get_child("VERSION")).strip() + repository_ctx.file("version.bzl", "VERSION = '%s'" % version) + repository_ctx.file("__init__.py", "PATH = '%s'\nVERSION = '%s'" % (repo_path, version)) + repository_ctx.file("WORKSPACE", "") + repository_ctx.file("BUILD", """ +load("@rules_python//python:defs.bzl", "py_library") + +py_library(name = "envoy_repo", srcs = ["__init__.py"], visibility = ["//visibility:public"]) + +""") + +_envoy_repo = repository_rule( + implementation = _envoy_repo_impl, + attrs = { + "envoy_root": attr.label(default = "@envoy//:BUILD"), + }, +) + +def envoy_repo(): + if "envoy_repo" not in native.existing_rules().keys(): + _envoy_repo(name = "envoy_repo") + # Python dependencies. def _python_deps(): # TODO(htuch): convert these to pip3_import. @@ -100,6 +148,9 @@ def _rust_deps(): external_http_archive("rules_rust") def envoy_dependencies(skip_targets = []): + # Add a binding for repository variables. + envoy_repo() + # Setup Envoy developer tools. envoy_dev_binding() @@ -139,6 +190,7 @@ def envoy_dependencies(skip_targets = []): _com_github_google_tcmalloc() _com_github_gperftools_gperftools() _com_github_grpc_grpc() + _com_github_intel_ipp_crypto_crypto_mb() _com_github_jbeder_yaml_cpp() _com_github_libevent_libevent() _com_github_luajit_luajit() @@ -175,7 +227,11 @@ def envoy_dependencies(skip_targets = []): external_http_archive("com_github_google_flatbuffers") external_http_archive("bazel_toolchains") external_http_archive("bazel_compdb") - external_http_archive("envoy_build_tools") + external_http_archive( + name = "envoy_build_tools", + patch_args = ["-p1"], + patches = ["@envoy//bazel/external:envoy_build_tools.patch"], + ) external_http_archive("rules_cc") external_http_archive("rules_pkg") @@ -325,6 +381,12 @@ def _com_github_google_libsxg(): actual = "@envoy//bazel/foreign_cc:libsxg", ) +def _com_github_intel_ipp_crypto_crypto_mb(): + external_http_archive( + name = "com_github_intel_ipp_crypto_crypto_mb", + build_file_content = BUILD_ALL_CONTENT, + ) + def _com_github_jbeder_yaml_cpp(): external_http_archive( name = "com_github_jbeder_yaml_cpp", @@ -1014,7 +1076,6 @@ filegroup( external_http_archive( name = "kafka_source", build_file_content = KAFKASOURCE_BUILD_CONTENT, - patches = ["@envoy//bazel/external:kafka_int32.patch"], ) # This archive provides Kafka C/CPP client used by mesh filter to communicate with upstream diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index e6aa827110114..22849d7fd3c27 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -39,21 +39,21 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Apple Rules for Bazel", project_desc = "Bazel rules for Apple platforms", project_url = "https://github.com/bazelbuild/rules_apple", - version = "0.31.2", - sha256 = "c84962b64d9ae4472adfb01ec2cf1aa73cb2ee8308242add55fa7cc38602d882", + version = "0.31.3", + sha256 = "0052d452af7742c8f3a4e0929763388a66403de363775db7e90adecb2ba4944b", urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"], - release_date = "2021-05-07", + release_date = "2021-08-08", use_category = ["build"], ), rules_fuzzing = dict( project_name = "Fuzzing Rules for Bazel", project_desc = "Bazel rules for fuzz tests", project_url = "https://github.com/bazelbuild/rules_fuzzing", - version = "0.1.3", - sha256 = "ce99c277c4e9e21f77222757936bf7ffb8823911497db84bdd57a796588fcf01", + version = "0.2.0", + sha256 = "9b688a77b930e1842312d37b00fbb796b96323a2eb8362b2cfb68e7d6e74f860", strip_prefix = "rules_fuzzing-{version}", urls = ["https://github.com/bazelbuild/rules_fuzzing/archive/v{version}.tar.gz"], - release_date = "2021-04-01", + release_date = "2021-07-12", use_category = ["test_only"], implied_untracked_deps = [ # This is a repository rule generated to define an OSS-Fuzz fuzzing @@ -65,11 +65,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "envoy-build-tools", project_desc = "Common build tools shared by the Envoy/UDPA ecosystem", project_url = "https://github.com/envoyproxy/envoy-build-tools", - version = "a955a00bed5f35777a83899ee680f8530eee4718", - sha256 = "b0830dc6fc1e3a095c5d817ca768c89c407bdd71894e1641daf500d28cb269da", + version = "55a7bbe700586729bd38231a9a6f3dcd1ff85e7d", + sha256 = "11893be9f0334a7e12ffc04b3b034dffe0bb5516d36654011532136c7929ae27", strip_prefix = "envoy-build-tools-{version}", urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"], - release_date = "2021-05-25", + release_date = "2021-09-28", use_category = ["build"], ), boringssl = dict( @@ -141,12 +141,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "xxHash", project_desc = "Extremely fast hash algorithm", project_url = "https://github.com/Cyan4973/xxHash", - version = "0.7.3", - sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7", + version = "0.8.0", + sha256 = "7054c3ebd169c97b64a92d7b994ab63c70dd53a06974f1f630ab782c28db0f4f", strip_prefix = "xxHash-{version}", urls = ["https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - release_date = "2020-03-05", + release_date = "2020-07-27", cpe = "N/A", ), com_github_envoyproxy_sqlparser = dict( @@ -175,6 +175,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/mirror/tclap/archive/tclap-{version}-release-final.tar.gz"], release_date = "2011-04-16", use_category = ["other"], + cpe = "cpe:2.3:a:tclap_project:tclap:*", ), com_github_fmtlib_fmt = dict( project_name = "fmt", @@ -240,11 +241,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "gperftools", project_desc = "tcmalloc and profiling libraries", project_url = "https://github.com/gperftools/gperftools", - version = "2.8", - sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e", + version = "2.9.1", + sha256 = "ea566e528605befb830671e359118c2da718f721c27225cbbc93858c7520fee3", strip_prefix = "gperftools-{version}", urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz"], - release_date = "2020-07-06", + release_date = "2021-03-03", use_category = ["dataplane_core", "controlplane"], cpe = "cpe:2.3:a:gperftools_project:gperftools:*", ), @@ -260,6 +261,19 @@ REPOSITORY_LOCATIONS_SPEC = dict( release_date = "2021-06-07", cpe = "cpe:2.3:a:grpc:grpc:*", ), + com_github_intel_ipp_crypto_crypto_mb = dict( + project_name = "libipp-crypto", + project_desc = "Intel® Integrated Performance Primitives Cryptography", + project_url = "https://github.com/intel/ipp-crypto", + version = "2021.4", + sha256 = "23e250dcf281aa00d186be8dc4e34fa8fc5c95a0895694cd00b33f18af5d60c7", + strip_prefix = "ipp-crypto-ippcp_{version}", + urls = ["https://github.com/intel/ipp-crypto/archive/ippcp_{version}.tar.gz"], + release_date = "2021-10-01", + use_category = ["dataplane_ext"], + extensions = ["envoy.tls.key_providers.cryptomb"], + cpe = "cpe:2.3:a:intel:cryptography_for_intel_integrated_performance_primitives:*", + ), com_github_luajit_luajit = dict( project_name = "LuaJIT", project_desc = "Just-In-Time compiler for Lua", @@ -292,12 +306,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Nghttp2", project_desc = "Implementation of HTTP/2 and its header compression algorithm HPACK in Cimplementation of HTTP/2 and its header compression algorithm HPACK in C", project_url = "https://nghttp2.org", - version = "1.42.0", - sha256 = "884d18a0158908125d58b1b61d475c0325e5a004e3d61a56b5fcc55d5f4b7af5", + version = "1.45.1", + sha256 = "2379ebeff7b02e14b9a414551d73540ddce5442bbecda2748417e8505916f3e7", strip_prefix = "nghttp2-{version}", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - release_date = "2020-11-23", + release_date = "2021-09-21", cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), io_opentracing_cpp = dict( @@ -342,7 +356,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( use_category = ["observability_ext"], extensions = ["envoy.tracers.skywalking"], release_date = "2021-06-07", - cpe = "N/A", + cpe = "cpe:2.3:a:apache:skywalking:*", ), com_github_skyapm_cpp2sky = dict( project_name = "cpp2sky", @@ -441,12 +455,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "zlib-ng", project_desc = "zlib fork (higher performance)", project_url = "https://github.com/zlib-ng/zlib-ng", - version = "b802a303ce8b6c86fbe3f93d59e0a82333768c0c", - sha256 = "e051eade607ecbbfa2c7ed3087fe53e5d3a58325375e1e28209594138e4aa93d", + version = "2.0.5", + sha256 = "eca3fe72aea7036c31d00ca120493923c4d5b99fe02e6d3322f7c88dbdcd0085", strip_prefix = "zlib-ng-{version}", urls = ["https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - release_date = "2020-10-18", + release_date = "2021-06-25", cpe = "N/A", ), com_github_jbeder_yaml_cpp = dict( @@ -534,15 +548,15 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "nlohmann JSON", project_desc = "Fast JSON parser/generator for C++", project_url = "https://nlohmann.github.io/json", - version = "3.9.1", - sha256 = "4cf0df69731494668bdd6460ed8cb269b68de9c19ad8c27abc24cd72605b2d5b", + version = "3.10.2", + sha256 = "081ed0f9f89805c2d96335c3acfa993b39a0a5b4b4cef7edb68dd2210a13458c", strip_prefix = "json-{version}", urls = ["https://github.com/nlohmann/json/archive/v{version}.tar.gz"], # This will be a replacement for rapidJSON used in extensions and may also be a fast # replacement for protobuf JSON. use_category = ["controlplane", "dataplane_core"], - release_date = "2020-08-06", - cpe = "cpe:2.3:a:json_project:json:*", + release_date = "2021-08-26", + cpe = "cpe:2.3:a:json-for-modern-cpp_project:json-for-modern-cpp:*", ), # This is an external dependency needed while running the # envoy docker image. A bazel target has been created since @@ -606,30 +620,31 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/google/googletest/archive/{version}.tar.gz"], release_date = "2020-09-10", use_category = ["test_only"], + cpe = "cpe:2.3:a:google:google_test:*", ), com_google_protobuf = dict( project_name = "Protocol Buffers", project_desc = "Language-neutral, platform-neutral extensible mechanism for serializing structured data", project_url = "https://developers.google.com/protocol-buffers", - version = "3.16.0", - sha256 = "d7371dc2d46fddac1af8cb27c0394554b068768fc79ecaf5be1a1863e8ff3392", + version = "3.18.0", + sha256 = "52b6160ae9266630adb5e96a9fc645215336371a740e87d411bfb63ea2f268a0", strip_prefix = "protobuf-{version}", urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - release_date = "2021-05-07", + release_date = "2021-09-15", cpe = "cpe:2.3:a:google:protobuf:*", ), grpc_httpjson_transcoding = dict( project_name = "grpc-httpjson-transcoding", project_desc = "Library that supports transcoding so that HTTP/JSON can be converted to gRPC", project_url = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding", - version = "f1591a41318104b7e27a26be12f502b106a16256", - sha256 = "440baf465096ce1a7152c6d1090a70e871e5ca93b23c6cf9f8cd79f028bf5bb8", + version = "3127eeaf889d48b5d2cd870fd910f1ae3e7abca4", + sha256 = "f98da3fe9b2539c9fc9b3884e01baa8d2e19ed016bc5f41bed2998781c96ac63", strip_prefix = "grpc-httpjson-transcoding-{version}", urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.grpc_json_transcoder"], - release_date = "2021-05-08", + release_date = "2021-09-22", cpe = "N/A", ), io_bazel_rules_go = dict( @@ -640,7 +655,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b", urls = ["https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz"], use_category = ["build", "api"], - release_date = "2021-03-17", + release_date = "2021-03-18", implied_untracked_deps = [ "com_github_golang_protobuf", "io_bazel_rules_nogo", @@ -652,45 +667,42 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "C++ rules for Bazel", project_desc = "Bazel rules for the C++ language", project_url = "https://github.com/bazelbuild/rules_cc", - # TODO(lizan): pin to a point releases when there's a released version. - version = "b1c40e1de81913a3c40e5948f78719c28152486d", - sha256 = "71d037168733f26d2a9648ad066ee8da4a34a13f51d24843a42efa6b65c2420f", - strip_prefix = "rules_cc-{version}", - urls = ["https://github.com/bazelbuild/rules_cc/archive/{version}.tar.gz"], - release_date = "2020-11-11", + version = "0.0.1", + sha256 = "4dccbfd22c0def164c8f47458bd50e0c7148f3d92002cdb459c2a96a68498241", + urls = ["https://github.com/bazelbuild/rules_cc/releases/download/{version}/rules_cc-{version}.tar.gz"], + release_date = "2021-10-07", use_category = ["build"], ), rules_foreign_cc = dict( project_name = "Rules for using foreign build systems in Bazel", project_desc = "Rules for using foreign build systems in Bazel", project_url = "https://github.com/bazelbuild/rules_foreign_cc", - version = "d54c78ab86b40770ee19f0949db9d74a831ab9f0", - sha256 = "e7446144277c9578141821fc91c55a61df7ae01bda890902f7286f5fd2f6ae46", + version = "6c0c2af3d599f4c23117a5e65e811ebab75bb151", + sha256 = "8a438371fa742bbbae8b6d995905280053098c5aac28cd434240cd75bc2415a5", strip_prefix = "rules_foreign_cc-{version}", urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz"], - release_date = "2020-10-26", - use_category = ["build"], + release_date = "2021-09-22", + use_category = ["build", "dataplane_core", "controlplane"], ), rules_python = dict( project_name = "Python rules for Bazel", project_desc = "Bazel rules for the Python language", project_url = "https://github.com/bazelbuild/rules_python", - version = "9f597623ccfbe430b0d81c82498e33b80b7aec88", - sha256 = "8d61fed6974f1e69e09243ca78c9ecf82f50fa3de64bb5df6b0b9061f9c9639b", - release_date = "2021-09-07", - strip_prefix = "rules_python-{version}", - urls = ["https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz"], + version = "0.4.0", + sha256 = "954aa89b491be4a083304a2cb838019c8b8c3720a7abb9c4cb81ac7a24230cea", + release_date = "2021-09-12", + urls = ["https://github.com/bazelbuild/rules_python/releases/download/{version}/rules_python-{version}.tar.gz"], use_category = ["build"], ), rules_pkg = dict( project_name = "Packaging rules for Bazel", project_desc = "Bazel rules for the packaging distributions", project_url = "https://github.com/bazelbuild/rules_pkg", - version = "0.4.0", - sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + version = "0.5.1", + sha256 = "a89e203d3cf264e564fcb96b6e06dd70bc0557356eb48400ce4b5d97c2c3720d", urls = ["https://github.com/bazelbuild/rules_pkg/releases/download/{version}/rules_pkg-{version}.tar.gz"], use_category = ["build"], - release_date = "2021-03-03", + release_date = "2021-08-18", ), six = dict( project_name = "Six", @@ -722,11 +734,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Webassembly Micro Runtime", project_desc = "A standalone runtime with a small footprint for WebAssembly", project_url = "https://github.com/bytecodealliance/wasm-micro-runtime", - version = "b554a9d05d89bb4ef28068b4ae4d0ee6c99bc9db", - sha256 = "de6b68118c5d4b0d37c9049fa08fae6a850304522ec307f087f0eca4ad8fff57", + version = "WAMR-08-10-2021", + sha256 = "4016f8330b2ed4fb5d9541ecd5bc4298f324097803a1f270fdbe691389cedfd9", strip_prefix = "wasm-micro-runtime-{version}", urls = ["https://github.com/bytecodealliance/wasm-micro-runtime/archive/{version}.tar.gz"], - release_date = "2021-07-06", + release_date = "2021-08-10", use_category = ["dataplane_ext"], extensions = ["envoy.wasm.runtime.wamr"], cpe = "N/A", @@ -755,7 +767,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( release_date = "2021-04-05", use_category = ["dataplane_ext"], extensions = ["envoy.wasm.runtime.wasmtime"], - cpe = "N/A", + cpe = "cpe:2.3:a:bytecodealliance:wasmtime:*", ), com_github_wasm_c_api = dict( project_name = "wasm-c-api", @@ -790,8 +802,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "curl", project_desc = "Library for transferring data with URLs", project_url = "https://curl.haxx.se", - version = "7.77.0", - sha256 = "b0a3428acb60fa59044c4d0baae4e4fc09ae9af1d8a3aa84b2e3fbcd99841f77", + version = "7.79.1", + sha256 = "370b11201349816287fb0ccc995e420277fbfcaf76206e309b3f60f0eda090c2", strip_prefix = "curl-{version}", urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"], use_category = ["dataplane_ext", "observability_ext"], @@ -801,7 +813,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.grpc_credentials.aws_iam", "envoy.tracers.opencensus", ], - release_date = "2021-05-26", + release_date = "2021-09-22", cpe = "cpe:2.3:a:haxx:libcurl:*", ), com_googlesource_chromium_v8 = dict( @@ -822,36 +834,35 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://github.com/google/quiche", - version = "8d5eb27ee2e3f009f7180e8ace0ff97830d9c3e9", - sha256 = "88cc71556b96bbec953a716a12c26f88b8af4d5e9a83cf3ec38aba4caed6bf52", - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/{version}.tar.gz + version = "72442c9337bac2fa6865e223e56fe9aac90d84a8", + sha256 = "ababed9c36cb16e43e7f1d508ae4a6ea89831752944fded3fb4fd2b3bead0bad", urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"], strip_prefix = "quiche-{version}", use_category = ["dataplane_core"], - release_date = "2021-08-31", + release_date = "2021-10-06", cpe = "N/A", ), com_googlesource_googleurl = dict( project_name = "Chrome URL parsing library", project_desc = "Chrome URL parsing library", project_url = "https://quiche.googlesource.com/googleurl", - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz. - version = "ef0d23689e240e6c8de4c3a5296b209128c87373", - sha256 = "d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176", + # Static snapshot of https://quiche.googlesource.com/googleurl/+archive/561705e0066ff11e6cb97b8092f1547835beeb92.tar.gz. + version = "561705e0066ff11e6cb97b8092f1547835beeb92", + sha256 = "7ce00768fea1fa4c7bf658942f13e41c9ba30e9cff931a6cda2f9fd02289f673", urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], extensions = [], - release_date = "2020-07-30", + release_date = "2021-08-31", cpe = "N/A", ), com_google_cel_cpp = dict( project_name = "Common Expression Language (CEL) C++ library", project_desc = "Common Expression Language (CEL) C++ library", project_url = "https://opensource.google/projects/cel", - version = "0.6.1", - sha256 = "d001494f1aa7d88172af944233fac3d7f83d9183d66590aa787aa2a35aab0440", + version = "89d81b2d2c24943b6e4fd5e8fc321099c2ab6d3f", + sha256 = "1408ef31e77ed847b420ff108da9652ad1702401008f2a75b671fba860a9707d", strip_prefix = "cel-cpp-{version}", - urls = ["https://github.com/google/cel-cpp/archive/v{version}.tar.gz"], + urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", @@ -862,18 +873,19 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.rbac", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", + "envoy.rbac.matchers.upstream_ip_port", ], - release_date = "2021-06-28", + release_date = "2021-10-07", cpe = "N/A", ), com_github_google_flatbuffers = dict( project_name = "FlatBuffers", project_desc = "Cross platform serialization library architected for maximum memory efficiency", project_url = "https://github.com/google/flatbuffers", - version = "a83caf5910644ba1c421c002ef68e42f21c15f9f", - sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a", + version = "2.0.0", + sha256 = "9ddb9031798f4f8754d00fca2f1a68ecf9d0f83dfac7239af1311e4fd9a565c4", strip_prefix = "flatbuffers-{version}", - urls = ["https://github.com/google/flatbuffers/archive/{version}.tar.gz"], + urls = ["https://github.com/google/flatbuffers/archive/v{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", @@ -884,20 +896,21 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.rbac", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", + "envoy.rbac.matchers.upstream_ip_port", ], - release_date = "2020-04-02", - cpe = "N/A", + release_date = "2021-05-10", + cpe = "cpe:2.3:a:google:flatbuffers:*", ), com_googlesource_code_re2 = dict( project_name = "RE2", project_desc = "RE2, a regular expression library", project_url = "https://github.com/google/re2", - version = "2020-07-06", - sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f", + version = "2021-09-01", + sha256 = "42a2e1d56b5de252f5d418dc1cc0848e9e52ca22b056453988b18c6195ec7f8d", strip_prefix = "re2-{version}", urls = ["https://github.com/google/re2/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - release_date = "2020-07-06", + release_date = "2021-09-01", cpe = "N/A", ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but @@ -907,13 +920,14 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "compiler-rt", project_desc = "LLVM compiler runtime library", project_url = "https://compiler-rt.llvm.org", - version = "11.0.1", - sha256 = "087be3f1116e861cd969c9b0b0903c27028b52eaf45157276f50a9c2500687fc", + version = "12.0.1", + sha256 = "b4c8d5f2a802332987c1c0a95b5afb35b1a66a96fe44add4e4ed4792c4cba0a4", # Only allow peeking at fuzzer related files for now. strip_prefix = "compiler-rt-{version}.src", urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz"], - release_date = "2021-01-06", + release_date = "2021-07-09", use_category = ["test_only"], + cpe = "cpe:2.3:a:llvm:compiler-rt:*", ), upb = dict( project_name = "upb", @@ -931,13 +945,13 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Kafka (source)", project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", - version = "2.4.1", - sha256 = "740236f44d66e33ea83382383b4fb7eabdab7093a644b525dd5ec90207f933bd", + version = "2.8.1", + sha256 = "c3fd89257e056e11b5e1b09d4bbd8332ce5abfdfa7c7a5bb6a5cfe9860fcc688", strip_prefix = "kafka-{version}/clients/src/main/resources/common/message", urls = ["https://github.com/apache/kafka/archive/{version}.zip"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.network.kafka_broker", "envoy.filters.network.kafka_mesh"], - release_date = "2020-03-03", + release_date = "2021-09-14", cpe = "cpe:2.3:a:apache:kafka:*", ), edenhill_librdkafka = dict( @@ -957,11 +971,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Kafka (server binary)", project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", - version = "2.4.1", - sha256 = "2177cbd14118999e1d76fec628ca78ace7e6f841219dbc6035027c796bbe1a2a", - strip_prefix = "kafka_2.12-{version}", - urls = ["https://archive.apache.org/dist/kafka/{version}/kafka_2.12-{version}.tgz"], - release_date = "2020-03-12", + version = "2.8.1", + sha256 = "4888b03e3b27dd94f2d830ce3bae9d7d98b0ccee3a5d30c919ccb60e0fa1f139", + strip_prefix = "kafka_2.13-{version}", + urls = ["https://archive.apache.org/dist/kafka/{version}/kafka_2.13-{version}.tgz"], + release_date = "2021-09-14", use_category = ["test_only"], ), kafka_python_client = dict( diff --git a/bazel/utils.bzl b/bazel/utils.bzl new file mode 100644 index 0000000000000..0961f00eb446a --- /dev/null +++ b/bazel/utils.bzl @@ -0,0 +1,18 @@ +load("@bazel_skylib//rules:write_file.bzl", "write_file") + +def json_data( + name, + data, + visibility = ["//visibility:public"], + **kwargs): + """Write a bazel object to a file + + The provided `data` object should be json serializable. + """ + write_file( + name = name, + out = "%s.json" % name, + content = json.encode(data).split("\n"), + visibility = visibility, + **kwargs + ) diff --git a/ci/Dockerfile-envoy-windows b/ci/Dockerfile-envoy-windows index 6f9514569c1c4..edeff92dd4ebd 100644 --- a/ci/Dockerfile-envoy-windows +++ b/ci/Dockerfile-envoy-windows @@ -4,7 +4,8 @@ ARG BUILD_TAG=ltsc2019 FROM $BUILD_OS:$BUILD_TAG USER ContainerAdministrator -RUN net user /add "EnvoyUser" +RUN net accounts /MaxPWAge:unlimited +RUN net user /add "EnvoyUser" /expires:never RUN net localgroup "Network Configuration Operators" "EnvoyUser" /add RUN mkdir "C:\\Program\ Files\\envoy" diff --git a/ci/do_ci.sh b/ci/do_ci.sh index d507d36993da0..4334c2304b34c 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -135,7 +135,7 @@ function bazel_binary_build() { fi # Build su-exec utility - bazel build external:su-exec + bazel build "${BAZEL_BUILD_OPTIONS[@]}" external:su-exec cp_binary_for_image_build "${BINARY_TYPE}" "${COMPILE_TYPE}" "${EXE_NAME}" } @@ -150,7 +150,7 @@ function bazel_contrib_binary_build() { function run_process_test_result() { if [[ -z "$CI_SKIP_PROCESS_TEST_RESULTS" ]] && [[ $(find "$TEST_TMPDIR" -name "*_attempt.xml" 2> /dev/null) ]]; then echo "running flaky test reporting script" - "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET" + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //ci/flaky_test:process_xml "$CI_TARGET" else echo "no flaky test results found" fi @@ -368,24 +368,19 @@ elif [[ "$CI_TARGET" == "bazel.api" ]]; then "${ENVOY_SRCDIR}"/tools/api/validate_structure.py echo "Validate Golang protobuf generation..." "${ENVOY_SRCDIR}"/tools/api/generate_go_protobuf.py - echo "Testing API and API Boosting..." - bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \ - @envoy_api_canonical//tools:tap2pcap_test @envoy_dev//clang_tools/api_booster/... + echo "Testing API..." + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api//test/... @envoy_api//tools/... \ + @envoy_api//tools:tap2pcap_test echo "Building API..." - bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//envoy/... - echo "Testing API boosting (golden C++ tests)..." - # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet. - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" python3.8 "${ENVOY_SRCDIR}"/tools/api_boost/api_boost_test.py + bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api//envoy/... exit 0 elif [[ "$CI_TARGET" == "bazel.api_compat" ]]; then - echo "Building buf..." - bazel build @com_github_bufbuild_buf//:buf - BUF_PATH=$(realpath "bazel-source/external/com_github_bufbuild_buf/bin/buf") echo "Checking API for breaking changes to protobuf backwards compatibility..." BASE_BRANCH_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh) COMMIT_TITLE=$(git log -n 1 --pretty='format:%C(auto)%h (%s, %ad)' "${BASE_BRANCH_REF}") echo -e "\tUsing base commit ${COMMIT_TITLE}" - "${ENVOY_SRCDIR}"/tools/api_proto_breaking_change_detector/detector_ci.sh "${BUF_PATH}" "${BASE_BRANCH_REF}" + # BAZEL_BUILD_OPTIONS needed for setting the repository_cache param. + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/api_proto_breaking_change_detector:detector_ci "${BASE_BRANCH_REF}" exit 0 elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then setup_clang_toolchain @@ -462,7 +457,7 @@ elif [[ "$CI_TARGET" == "deps" ]]; then "${ENVOY_SRCDIR}"/ci/check_repository_locations.sh # Run pip requirements tests - bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:pip_check "${ENVOY_SRCDIR}" + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:pip_check exit 0 elif [[ "$CI_TARGET" == "cve_scan" ]]; then @@ -481,9 +476,6 @@ elif [[ "$CI_TARGET" == "tooling" ]]; then echo "Run protoxform test" BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" ./tools/protoxform/protoxform_test.sh - echo "Run merge active shadow test" - bazel test "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:merge_active_shadow_test - echo "check_format_test..." "${ENVOY_SRCDIR}"/tools/code_format/check_format_test_helper.sh --log=WARN diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index 9eb97f75afe62..77cef1e83ecf0 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -38,8 +38,13 @@ build_args() { TYPE=$1 FILE_SUFFIX="${TYPE/-debug/}" FILE_SUFFIX="${FILE_SUFFIX/-contrib/}" + FILE_SUFFIX="${FILE_SUFFIX/-ltsc2022/}" printf ' -f ci/Dockerfile-envoy%s' "${FILE_SUFFIX}" + if [[ "${TYPE}" == *-windows* ]]; then + printf ' --build-arg BUILD_OS=%s --build-arg BUILD_TAG=%s' "${WINDOWS_IMAGE_BASE}" "${WINDOWS_IMAGE_TAG}" + fi + if [[ "${TYPE}" == *-contrib* ]]; then printf ' --build-arg ENVOY_BINARY=envoy-contrib' fi @@ -103,7 +108,7 @@ push_images() { PLATFORM="$(build_platforms "${TYPE}")" # docker buildx doesn't do push with default builder docker "${BUILD_COMMAND[@]}" --platform "${PLATFORM}" "${args[@]}" -t "${BUILD_TAG}" . --push || \ - docker push "${BUILD_TAG}" + docker push "${BUILD_TAG}" } MAIN_BRANCH="refs/heads/main" @@ -125,7 +130,7 @@ DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" if is_windows; then - BUILD_TYPES=("-windows") + BUILD_TYPES=("-${WINDOWS_BUILD_TYPE}") # BuildKit is not available for Windows images, use standard build command BUILD_COMMAND=("build") else diff --git a/ci/flaky_test/BUILD b/ci/flaky_test/BUILD new file mode 100644 index 0000000000000..7cbc182ec9e0a --- /dev/null +++ b/ci/flaky_test/BUILD @@ -0,0 +1,17 @@ +load("@rules_python//python:defs.bzl", "py_binary") +load("//bazel:envoy_build_system.bzl", "envoy_package") +load("@base_pip3//:requirements.bzl", "requirement") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +py_binary( + name = "process_xml", + srcs = ["process_xml.py"], + deps = [ + "@envoy_repo", + requirement("pygithub"), + requirement("slackclient"), + ], +) diff --git a/ci/flaky_test/process_xml.py b/ci/flaky_test/process_xml.py index 9eae5129275c8..943d710c4dbb3 100755 --- a/ci/flaky_test/process_xml.py +++ b/ci/flaky_test/process_xml.py @@ -1,17 +1,26 @@ #!/usr/bin/env python3 -import subprocess import os +import ssl +import subprocess +import sys +from typing import Iterable import xml.etree.ElementTree as ET + import slack from slack.errors import SlackApiError -import sys -import ssl + +import envoy_repo well_known_timeouts = [60, 300, 900, 3600] section_delimiter = "---------------------------------------------------------------------------------------------------\n" +def run_in_repo(command: Iterable) -> str: + """Run a command in the repo root""" + return subprocess.check_output(command, encoding="utf-8", cwd=envoy_repo.PATH) + + # Returns a boolean indicating if a test passed. def did_test_pass(file): tree = ET.parse(file) @@ -192,7 +201,7 @@ def get_git_info(CI_TARGET): elif os.getenv('BUILD_REASON'): ret += "Build reason: {}\n".format(os.environ['BUILD_REASON']) - output = subprocess.check_output(['git', 'log', '--format=%H', '-n', '1'], encoding='utf-8') + output = run_in_repo(['git', 'log', '--format=%H', '-n', '1']) ret += "Commmit: {}/commit/{}".format(os.environ['REPO_URI'], output) build_id = os.environ['BUILD_URI'].split('/')[-1] @@ -200,23 +209,23 @@ def get_git_info(CI_TARGET): ret += "\n" - remotes = subprocess.check_output(['git', 'remote'], encoding='utf-8').splitlines() + remotes = run_in_repo(['git', 'remote']).splitlines() if ("origin" in remotes): - output = subprocess.check_output(['git', 'remote', 'get-url', 'origin'], encoding='utf-8') + output = run_in_repo(['git', 'remote', 'get-url', 'origin']) ret += "Origin: {}".format(output.replace('.git', '')) if ("upstream" in remotes): - output = subprocess.check_output(['git', 'remote', 'get-url', 'upstream'], encoding='utf-8') + output = run_in_repo(['git', 'remote', 'get-url', 'upstream']) ret += "Upstream: {}".format(output.replace('.git', '')) - output = subprocess.check_output(['git', 'describe', '--all', '--always'], encoding='utf-8') + output = run_in_repo(['git', 'describe', '--all', '--always']) ret += "Latest ref: {}".format(output) ret += "\n" ret += "Last commit:\n" - output = subprocess.check_output(['git', 'show', '-s'], encoding='utf-8') + output = run_in_repo(['git', 'show', '-s']) for line in output.splitlines(): ret += "\t" + line + "\n" @@ -225,7 +234,7 @@ def get_git_info(CI_TARGET): return ret -if __name__ == "__main__": +def main(): CI_TARGET = "" if len(sys.argv) == 2: CI_TARGET = sys.argv[1] @@ -286,3 +295,10 @@ def get_git_info(CI_TARGET): print('No flaky tests found.\n') os.remove(os.environ["TMP_OUTPUT_PROCESS_XML"]) + + +if __name__ == "__main__": + if os.getenv("ENVOY_BUILD_ARCH") == "aarch64": + os.environ["MULTIDICT_NO_EXTENSIONS"] = 1 + os.environ["YARL_NO_EXTENSIONS"] = 1 + main() diff --git a/ci/flaky_test/requirements.txt b/ci/flaky_test/requirements.txt deleted file mode 100644 index 1e9f11f4cfa12..0000000000000 --- a/ci/flaky_test/requirements.txt +++ /dev/null @@ -1,138 +0,0 @@ -aiohttp==3.7.4.post0 \ - --hash=sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5 \ - --hash=sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8 \ - --hash=sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95 \ - --hash=sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290 \ - --hash=sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f \ - --hash=sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809 \ - --hash=sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe \ - --hash=sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287 \ - --hash=sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc \ - --hash=sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87 \ - --hash=sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0 \ - --hash=sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970 \ - --hash=sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f \ - --hash=sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde \ - --hash=sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c \ - --hash=sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8 \ - --hash=sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f \ - --hash=sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5 \ - --hash=sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf \ - --hash=sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df \ - --hash=sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213 \ - --hash=sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4 \ - --hash=sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009 \ - --hash=sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5 \ - --hash=sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013 \ - --hash=sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16 \ - --hash=sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5 \ - --hash=sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b \ - --hash=sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd \ - --hash=sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439 \ - --hash=sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22 \ - --hash=sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a \ - --hash=sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb \ - --hash=sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb \ - --hash=sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9 \ - --hash=sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe \ - --hash=sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf -async-timeout==3.0.1 \ - --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ - --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 -attrs==21.2.0 \ - --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ - --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb -chardet==4.0.0 \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa -idna==3.1 \ - --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \ - --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1 -idna_ssl==1.1.0 \ - --hash=sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c -multidict==5.1.0 \ - --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \ - --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \ - --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ - --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \ - --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \ - --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \ - --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \ - --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \ - --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \ - --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \ - --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ - --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \ - --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \ - --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \ - --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \ - --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \ - --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ - --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \ - --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \ - --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \ - --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \ - --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \ - --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \ - --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \ - --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \ - --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \ - --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \ - --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \ - --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \ - --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \ - --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \ - --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \ - --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \ - --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ - --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 \ - --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \ - --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 -slackclient==2.9.3 \ - --hash=sha256:2d68d668c02f4038299897e5c4723ab85dd40a3548354924b24f333a435856f8 \ - --hash=sha256:07ec8fa76f6aa64852210ae235ff9e637ba78124e06c0b07a7eeea4abb955965 -typing-extensions==3.10.0.2 \ - --hash=sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7 \ - --hash=sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34 \ - --hash=sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e -wheel==0.37.0 \ - --hash=sha256:21014b2bd93c6d0034b6ba5d35e4eb284340e09d63c59aef6fc14b0f346146fd \ - --hash=sha256:e2ef7239991699e3355d54f8e968a21bb940a1dbf34a4d226741e64462516fad -yarl==1.6.3 \ - --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ - --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ - --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \ - --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \ - --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \ - --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \ - --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \ - --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \ - --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \ - --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \ - --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \ - --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \ - --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \ - --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \ - --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \ - --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \ - --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ - --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \ - --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \ - --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \ - --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 \ - --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \ - --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \ - --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ - --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \ - --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \ - --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ - --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \ - --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \ - --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ - --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \ - --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \ - --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \ - --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \ - --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \ - --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \ - --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 diff --git a/ci/flaky_test/run_process_xml.sh b/ci/flaky_test/run_process_xml.sh deleted file mode 100755 index 38496128bb913..0000000000000 --- a/ci/flaky_test/run_process_xml.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -export ENVOY_SRCDIR=${ENVOY_SRCDIR:-.} - -# shellcheck source=tools/shell_utils.sh -. "${ENVOY_SRCDIR}"/tools/shell_utils.sh - -if [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then - export MULTIDICT_NO_EXTENSIONS=1 - export YARL_NO_EXTENSIONS=1 -fi - -python_venv process_xml "$1" diff --git a/ci/format_pre.sh b/ci/format_pre.sh index 831e57ca4a298..08808386b16b2 100755 --- a/ci/format_pre.sh +++ b/ci/format_pre.sh @@ -53,7 +53,7 @@ CURRENT=configs bazel run "${BAZEL_BUILD_OPTIONS[@]}" //configs:example_configs_validation CURRENT=python -bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:python_check -- --diff-file="$DIFF_OUTPUT" --fix "$(pwd)" +bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:python_check -- --diff-file="$DIFF_OUTPUT" --fix CURRENT=extensions bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/extensions:extensions_check diff --git a/ci/osx-build-config/extensions_build_config.bzl b/ci/osx-build-config/extensions_build_config.bzl index 40c8fee0685e8..379d6748e5a95 100644 --- a/ci/osx-build-config/extensions_build_config.bzl +++ b/ci/osx-build-config/extensions_build_config.bzl @@ -14,3 +14,4 @@ EXTENSIONS = { WINDOWS_EXTENSIONS = {} EXTENSION_CONFIG_VISIBILITY = ["//:extension_config"] EXTENSION_PACKAGE_VISIBILITY = ["//:extension_library"] +CONTRIB_EXTENSION_PACKAGE_VISIBILITY = ["//:contrib_library"] diff --git a/ci/repokitteh/modules/newcontributor.star b/ci/repokitteh/modules/newcontributor.star deleted file mode 100644 index 865e5e90c7624..0000000000000 --- a/ci/repokitteh/modules/newcontributor.star +++ /dev/null @@ -1,43 +0,0 @@ - -NEW_CONTRIBUTOR_MESSAGE = """ -Hi @%s, welcome and thank you for your contribution. - -We will try to review your Pull Request as quickly as possible. - -In the meantime, please take a look at the [contribution guidelines](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md) if you have not done so already. - -""" - -DRAFT_MESSAGE = """ -As a reminder, PRs marked as draft will not be automatically assigned reviewers, -or be handled by maintainer-oncall triage. - -Please mark your PR as ready when you want it to be reviewed! -""" - - -def get_pr_author_association(issue_number): - return github.call( - method="GET", - path="repos/envoyproxy/envoy/pulls/%s" % issue_number)["json"]["author_association"] - -def is_newcontributor(issue_number): - return ( - get_pr_author_association(issue_number) - in ["NONE", "FIRST_TIME_CONTRIBUTOR", "FIRST_TIMER"]) - -def should_message_newcontributor(action, issue_number): - return ( - action == 'opened' - and is_newcontributor(issue_number)) - -def send_newcontributor_message(sender): - github.issue_create_comment(NEW_CONTRIBUTOR_MESSAGE % sender) - -def _pr(action, issue_number, sender, config, draft): - if should_message_newcontributor(action, issue_number): - send_newcontributor_message(sender) - if action == 'opened' and draft: - github.issue_create_comment(DRAFT_MESSAGE) - -handlers.pull_request(func=_pr) diff --git a/ci/repokitteh/modules/newpr.star b/ci/repokitteh/modules/newpr.star index 865e5e90c7624..4c4797f442262 100644 --- a/ci/repokitteh/modules/newpr.star +++ b/ci/repokitteh/modules/newpr.star @@ -34,7 +34,14 @@ def should_message_newcontributor(action, issue_number): def send_newcontributor_message(sender): github.issue_create_comment(NEW_CONTRIBUTOR_MESSAGE % sender) -def _pr(action, issue_number, sender, config, draft): +def is_envoy_repo(repo_owner, repo_name): + return ( + repo_owner == "envoyproxy" + and repo_name == "envoy") + +def _pr(action, issue_number, sender, config, draft, repo_owner, repo_name): + if not is_envoy_repo(repo_owner, repo_name): + return if should_message_newcontributor(action, issue_number): send_newcontributor_message(sender) if action == 'opened' and draft: diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh index 68d8afc4d94c3..e72082a9cf0b5 100755 --- a/ci/upload_gcs_artifact.sh +++ b/ci/upload_gcs_artifact.sh @@ -18,9 +18,13 @@ if [ ! -d "${SOURCE_DIRECTORY}" ]; then exit 1 fi -if [[ "$BUILD_REASON" == "PullRequest" ]]; then - # non-main upload to the last commit sha (first 7 chars) in the developers branch - UPLOAD_PATH="$(git log --pretty=%P -n 1 | cut -d' ' -f2 | head -c7)" +if [[ "$BUILD_REASON" == "PullRequest" ]] || [[ "$TARGET_SUFFIX" == "docs" ]]; then + # upload to the last commit sha (first 7 chars), either + # - docs build on main + # -> https://storage.googleapis.com/envoy-postsubmit/$UPLOAD_PATH/docs/envoy-docs-rst.tar.gz + # - PR build (commit sha from the developers branch) + # -> https://storage.googleapis.com/envoy-pr/$UPLOAD_PATH/$TARGET_SUFFIX + UPLOAD_PATH="$(git rev-parse HEAD | head -c7)" else UPLOAD_PATH="${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}}" fi diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh index 67ff0eb409949..eed32c1218868 100755 --- a/ci/windows_ci_steps.sh +++ b/ci/windows_ci_steps.sh @@ -103,7 +103,7 @@ if [[ $TEST_TARGETS == "//test/..." ]]; then bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" $TEST_TARGETS --test_tag_filters=-skip_on_windows,-fails_on_${FAIL_GROUP} --build_tests_only echo "running flaky test reporting script" - "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET" + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //ci/flaky_test:process_xml "$CI_TARGET" # Build tests that are known flaky or failing to ensure no compilation regressions bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=fails_on_${FAIL_GROUP} --build_tests_only diff --git a/configs/encapsulate_in_http1_connect.yaml b/configs/encapsulate_in_http1_connect.yaml index a11a997880327..f8f9a6bc4a668 100644 --- a/configs/encapsulate_in_http1_connect.yaml +++ b/configs/encapsulate_in_http1_connect.yaml @@ -1,7 +1,7 @@ # This configuration takes incoming data on port 10000 and encapsulates it in a CONNECT # request which is sent upstream port 10001. # It can be used to test TCP tunneling as described in docs/root/intro/arch_overview/http/upgrades.rst -# and running `curl --x 127.0.0.1:10000 https://www.google.com` +# and running `curl -x 127.0.0.1:10000 https://www.google.com` admin: address: diff --git a/configs/encapsulate_in_http2_connect.yaml b/configs/encapsulate_in_http2_connect.yaml index abe84ecc86e29..1f985457ab2dd 100644 --- a/configs/encapsulate_in_http2_connect.yaml +++ b/configs/encapsulate_in_http2_connect.yaml @@ -1,7 +1,7 @@ # This configuration takes incoming data on port 10000 and encapsulates it in a CONNECT # request which is sent upstream port 10001. # It can be used to test TCP tunneling as described in docs/root/intro/arch_overview/http/upgrades.rst -# and running `curl --x 127.0.0.1:10000 https://www.google.com` +# and running `curl -x 127.0.0.1:10000 https://www.google.com` admin: address: diff --git a/configs/encapsulate_in_http2_post.yaml b/configs/encapsulate_in_http2_post.yaml index 61353a97a886e..d3979c393ad7f 100644 --- a/configs/encapsulate_in_http2_post.yaml +++ b/configs/encapsulate_in_http2_post.yaml @@ -1,7 +1,7 @@ # This configuration takes incoming data on port 10000 and encapsulates it in a POST # request which is sent upstream port 10001. # It can be used to test TCP tunneling as described in docs/root/intro/arch_overview/http/upgrades.rst -# and running `curl --x 127.0.0.1:10000 https://www.google.com` +# and running `curl -x 127.0.0.1:10000 https://www.google.com` admin: address: diff --git a/configs/google_com_auto_http3_upstream_proxy.yaml b/configs/google_com_auto_http3_upstream_proxy.yaml new file mode 100644 index 0000000000000..8767f87a59ef8 --- /dev/null +++ b/configs/google_com_auto_http3_upstream_proxy.yaml @@ -0,0 +1,72 @@ +# An example config which accepts HTTP/1 requests over TCP and forwards them to google using HTTP/3 +admin: + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 9901 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + scheme_header_transformation: + scheme_to_overwrite: https + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + route: + host_rewrite_literal: www.google.com + cluster: service_google + http_filters: + - name: alternate_protocols_cache + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.alternate_protocols_cache.v3.FilterConfig + alternate_protocols_cache_options: + name: default_alternate_protocols_cache + - name: envoy.filters.http.router + clusters: + - name: service_google + connect_timeout: 30s + type: LOGICAL_DNS + # Comment out the following line to test on v6 networks + dns_lookup_family: V4_ONLY + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: service_google + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: www.google.com + port_value: 443 + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + auto_config: + http3_protocol_options: {} + alternate_protocols_cache_options: + name: default_alternate_protocols_cache + common_http_protocol_options: + idle_timeout: 1s + transport_socket: + name: envoy.transport_sockets.quic + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport + upstream_tls_context: + sni: www.google.com diff --git a/configs/requirements.txt b/configs/requirements.txt index 1cd69909b9962..7e65450464ab1 100644 --- a/configs/requirements.txt +++ b/configs/requirements.txt @@ -1,6 +1,6 @@ -Jinja2==3.0.1 \ - --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ - --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 +Jinja2==3.0.2 \ + --hash=sha256:8569982d3f0889eed11dd620c706d39b60c36d6d25843961f33f77fb6bc6b20c \ + --hash=sha256:827a0e32839ab1600d4eb1c4c33ec5a8edfbc5cb42dafa13b81f182f97784b45 MarkupSafe==2.0.1 \ --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ diff --git a/contrib/BUILD b/contrib/BUILD index aa0691c6142a8..ceedb6dfcaacb 100644 --- a/contrib/BUILD +++ b/contrib/BUILD @@ -1,6 +1,14 @@ +load("//bazel:utils.bzl", "json_data") +load(":contrib_build_config.bzl", "CONTRIB_EXTENSIONS") + licenses(["notice"]) # Apache 2 exports_files([ "extensions_metadata.yaml", "contrib_build_config.bzl", ]) + +json_data( + name = "contrib_extensions_build_config", + data = CONTRIB_EXTENSIONS, +) diff --git a/contrib/all_contrib_extensions.bzl b/contrib/all_contrib_extensions.bzl index 5a450825fd033..3862d7976a1f1 100644 --- a/contrib/all_contrib_extensions.bzl +++ b/contrib/all_contrib_extensions.bzl @@ -1,4 +1,14 @@ load(":contrib_build_config.bzl", "CONTRIB_EXTENSIONS") -def envoy_all_contrib_extensions(): - return [v + "_envoy_extension" for v in CONTRIB_EXTENSIONS.values()] +# linter requires indirection for @bazel_tools definitions +def envoy_contrib_linux_x86_64_constraints(): + return [ + "@bazel_tools//platforms:linux", + "@bazel_tools//platforms:x86_64", + ] + +ARM64_SKIP_CONTRIB_TARGETS = ["envoy.tls.key_providers.cryptomb"] +PPC_SKIP_CONTRIB_TARGETS = ["envoy.tls.key_providers.cryptomb"] + +def envoy_all_contrib_extensions(denylist = []): + return [v + "_envoy_extension" for k, v in CONTRIB_EXTENSIONS.items() if not k in denylist] diff --git a/contrib/contrib_build_config.bzl b/contrib/contrib_build_config.bzl index f27001d971be8..3a9987a910e49 100644 --- a/contrib/contrib_build_config.bzl +++ b/contrib/contrib_build_config.bzl @@ -16,4 +16,17 @@ CONTRIB_EXTENSIONS = { "envoy.filters.network.mysql_proxy": "//contrib/mysql_proxy/filters/network/source:config", "envoy.filters.network.postgres_proxy": "//contrib/postgres_proxy/filters/network/source:config", "envoy.filters.network.rocketmq_proxy": "//contrib/rocketmq_proxy/filters/network/source:config", + + # + # Sip proxy + # + + "envoy.filters.network.sip_proxy": "//contrib/sip_proxy/filters/network/source:config", + "envoy.filters.sip.router": "//contrib/sip_proxy/filters/network/source/router:config", + + # + # Private key providers + # + + "envoy.tls.key_providers.cryptomb": "//contrib/cryptomb/private_key_providers/source:config", } diff --git a/contrib/cryptomb/private_key_providers/source/BUILD b/contrib/cryptomb/private_key_providers/source/BUILD new file mode 100644 index 0000000000000..9e30bef90b5a0 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/source/BUILD @@ -0,0 +1,113 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_contrib_extension", + "envoy_cc_library", + "envoy_cmake", + "envoy_contrib_package", +) +load( + "//contrib:all_contrib_extensions.bzl", + "envoy_contrib_linux_x86_64_constraints", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cmake( + name = "ipp-crypto", + cache_entries = { + "BORINGSSL": "on", + }, + defines = [ + "OPENSSL_USE_STATIC_LIBS=TRUE", + ], + lib_source = "@com_github_intel_ipp_crypto_crypto_mb//:all", + out_static_libs = ["libcrypto_mb.a"], + tags = ["skip_on_windows"], + target_compatible_with = envoy_contrib_linux_x86_64_constraints(), + visibility = ["//visibility:private"], + working_directory = "sources/ippcp/crypto_mb", + deps = ["@boringssl//:ssl"], +) + +envoy_cc_library( + name = "ipp_crypto_wrapper_lib", + hdrs = ["ipp_crypto.h"] + select({ + "//bazel:linux_x86_64": [ + "ipp_crypto_impl.h", + ], + "//conditions:default": [ + ], + }), + defines = select({ + "//bazel:linux_x86_64": [], + "//conditions:default": [ + "IPP_CRYPTO_DISABLED=1", + ], + }), + external_deps = ["ssl"], + repository = "@envoy", + deps = select({ + "//bazel:linux_x86_64": [ + ":ipp-crypto", + ], + "//conditions:default": [], + }), +) + +envoy_cc_library( + name = "cryptomb_private_key_provider_lib", + srcs = [ + "cryptomb_private_key_provider.cc", + ], + hdrs = [ + "cryptomb_private_key_provider.h", + ], + external_deps = ["ssl"], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [ + ":ipp_crypto_wrapper_lib", + "//envoy/api:api_interface", + "//envoy/event:dispatcher_interface", + "//envoy/registry", + "//envoy/server:transport_socket_config_interface", + "//envoy/singleton:manager_interface", + "//envoy/ssl/private_key:private_key_config_interface", + "//envoy/ssl/private_key:private_key_interface", + "//source/common/common:logger_lib", + "//source/common/common:thread_lib", + "//source/common/config:datasource_lib", + "@envoy_api//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_contrib_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + defines = select({ + "//bazel:linux_x86_64": [], + "//conditions:default": [ + "IPP_CRYPTO_DISABLED=1", + ], + }), + deps = [ + "//envoy/registry", + "//envoy/ssl/private_key:private_key_config_interface", + "//envoy/ssl/private_key:private_key_interface", + "//source/common/common:logger_lib", + "//source/common/config:utility_lib", + "//source/common/protobuf:utility_lib", + "@envoy_api//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + ] + select({ + "//bazel:linux_x86_64": [ + ":cryptomb_private_key_provider_lib", + ":ipp_crypto_wrapper_lib", + ], + "//conditions:default": [ + ], + }), +) diff --git a/contrib/cryptomb/private_key_providers/source/config.cc b/contrib/cryptomb/private_key_providers/source/config.cc new file mode 100644 index 0000000000000..713dc733b6fb7 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/source/config.cc @@ -0,0 +1,57 @@ +#include "contrib/cryptomb/private_key_providers/source/config.h" + +#include + +#include "envoy/registry/registry.h" +#include "envoy/server/transport_socket_config.h" + +#include "source/common/common/logger.h" +#include "source/common/config/utility.h" +#include "source/common/protobuf/message_validator_impl.h" +#include "source/common/protobuf/utility.h" + +#ifndef IPP_CRYPTO_DISABLED +#include "contrib/cryptomb/private_key_providers/source/ipp_crypto_impl.h" +#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h" +#endif + +#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.h" +#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.validate.h" + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +Ssl::PrivateKeyMethodProviderSharedPtr +CryptoMbPrivateKeyMethodFactory::createPrivateKeyMethodProviderInstance( + const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& proto_config, + Server::Configuration::TransportSocketFactoryContext& private_key_provider_context) { + ProtobufTypes::MessagePtr message = + std::make_unique(); + + Config::Utility::translateOpaqueConfig(proto_config.typed_config(), + ProtobufMessage::getNullValidationVisitor(), *message); + const envoy::extensions::private_key_providers::cryptomb::v3alpha::CryptoMbPrivateKeyMethodConfig + conf = + MessageUtil::downcastAndValidate( + *message, private_key_provider_context.messageValidationVisitor()); + Ssl::PrivateKeyMethodProviderSharedPtr provider = nullptr; +#ifdef IPP_CRYPTO_DISABLED + throw EnvoyException("X86_64 architecture is required for cryptomb provider."); +#else + IppCryptoSharedPtr ipp = std::make_shared(); + provider = + std::make_shared(conf, private_key_provider_context, ipp); +#endif + return provider; +} + +REGISTER_FACTORY(CryptoMbPrivateKeyMethodFactory, Ssl::PrivateKeyMethodProviderInstanceFactory); + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/source/config.h b/contrib/cryptomb/private_key_providers/source/config.h new file mode 100644 index 0000000000000..d72a395da189d --- /dev/null +++ b/contrib/cryptomb/private_key_providers/source/config.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "envoy/ssl/private_key/private_key.h" +#include "envoy/ssl/private_key/private_key_config.h" + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +class CryptoMbPrivateKeyMethodFactory : public Ssl::PrivateKeyMethodProviderInstanceFactory, + public Logger::Loggable { +public: + // Ssl::PrivateKeyMethodProviderInstanceFactory + Ssl::PrivateKeyMethodProviderSharedPtr createPrivateKeyMethodProviderInstance( + const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& message, + Server::Configuration::TransportSocketFactoryContext& private_key_provider_context) override; + std::string name() const override { return "cryptomb"; }; +}; + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.cc b/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.cc new file mode 100644 index 0000000000000..78312b9636f9b --- /dev/null +++ b/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.cc @@ -0,0 +1,606 @@ +#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h" + +#include + +#include "envoy/registry/registry.h" +#include "envoy/server/transport_socket_config.h" + +#include "source/common/config/datasource.h" + +#include "openssl/ec.h" +#include "openssl/ssl.h" + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +CryptoMbContext::CryptoMbContext(Event::Dispatcher& dispatcher, + Ssl::PrivateKeyConnectionCallbacks& cb) + : status_(RequestStatus::Retry), dispatcher_(dispatcher), cb_(cb) {} + +void CryptoMbContext::scheduleCallback(enum RequestStatus status) { + schedulable_ = dispatcher_.createSchedulableCallback([this, status]() -> void { + // The status can't be set beforehand, because the callback asserts + // if someone else races to call doHandshake() and the status goes to + // HandshakeComplete. + setStatus(status); + this->cb_.onPrivateKeyMethodComplete(); + }); + schedulable_->scheduleCallbackNextIteration(); +} + +bool CryptoMbRsaContext::rsaInit(const uint8_t* in, size_t in_len) { + if (rsa_ == nullptr) { + return false; + } + + // Initialize the values with the RSA key. + size_t in_buf_size = in_len; + out_len_ = RSA_size(rsa_.get()); + + if (out_len_ > in_buf_size) { + in_buf_size = out_len_; + } + + RSA_get0_key(rsa_.get(), &n_, &e_, &d_); + RSA_get0_factors(rsa_.get(), &p_, &q_); + RSA_get0_crt_params(rsa_.get(), &dmp1_, &dmq1_, &iqmp_); + + if (p_ == nullptr || q_ == nullptr || dmp1_ == nullptr || dmq1_ == nullptr || iqmp_ == nullptr) { + return false; + } + + in_buf_ = std::make_unique(in_buf_size); + memcpy(in_buf_.get(), in, in_len); // NOLINT(safe-memcpy) + + return true; +} + +namespace { + +int calculateDigest(const EVP_MD* md, const uint8_t* in, size_t in_len, unsigned char* hash, + unsigned int* hash_len) { + bssl::ScopedEVP_MD_CTX ctx; + + // Calculate the message digest for signing. + if (!EVP_DigestInit_ex(ctx.get(), md, nullptr) || !EVP_DigestUpdate(ctx.get(), in, in_len) || + !EVP_DigestFinal_ex(ctx.get(), hash, hash_len)) { + return 0; + } + return 1; +} + +ssl_private_key_result_t ecdsaPrivateKeySignInternal(CryptoMbPrivateKeyConnection* ops, + uint8_t* out, size_t* out_len, size_t max_out, + uint16_t signature_algorithm, + const uint8_t* in, size_t in_len) { + unsigned char hash[EVP_MAX_MD_SIZE]; + unsigned int hash_len; + unsigned int out_len_unsigned; + + if (ops == nullptr) { + return ssl_private_key_failure; + } + + const EVP_MD* md = SSL_get_signature_algorithm_digest(signature_algorithm); + if (md == nullptr) { + return ssl_private_key_failure; + } + + if (!calculateDigest(md, in, in_len, hash, &hash_len)) { + return ssl_private_key_failure; + } + + bssl::UniquePtr pkey = ops->getPrivateKey(); + if (pkey == nullptr) { + return ssl_private_key_failure; + } + + if (EVP_PKEY_id(pkey.get()) != SSL_get_signature_algorithm_key_type(signature_algorithm)) { + return ssl_private_key_failure; + } + + bssl::UniquePtr ec_key(EVP_PKEY_get1_EC_KEY(pkey.get())); + if (ec_key == nullptr) { + return ssl_private_key_failure; + } + + if (max_out < ECDSA_size(ec_key.get())) { + return ssl_private_key_failure; + } + + // Borrow "out" because it has been already initialized to the max_out size. + if (!ECDSA_sign(0, hash, hash_len, out, &out_len_unsigned, ec_key.get())) { + return ssl_private_key_failure; + } + + if (out_len_unsigned > max_out) { + return ssl_private_key_failure; + } + *out_len = out_len_unsigned; + return ssl_private_key_success; +} + +ssl_private_key_result_t ecdsaPrivateKeySign(SSL* ssl, uint8_t* out, size_t* out_len, + size_t max_out, uint16_t signature_algorithm, + const uint8_t* in, size_t in_len) { + return ssl == nullptr ? ssl_private_key_failure + : ecdsaPrivateKeySignInternal( + static_cast(SSL_get_ex_data( + ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex())), + out, out_len, max_out, signature_algorithm, in, in_len); +} + +ssl_private_key_result_t ecdsaPrivateKeyDecrypt(SSL*, uint8_t*, size_t*, size_t, const uint8_t*, + size_t) { + // Expecting to get only signing requests. + return ssl_private_key_failure; +} + +ssl_private_key_result_t rsaPrivateKeySignInternal(CryptoMbPrivateKeyConnection* ops, uint8_t*, + size_t*, size_t, uint16_t signature_algorithm, + const uint8_t* in, size_t in_len) { + + ssl_private_key_result_t status = ssl_private_key_failure; + if (ops == nullptr) { + return status; + } + + bssl::UniquePtr pkey = ops->getPrivateKey(); + + // Check if the SSL instance has correct data attached to it. + if (EVP_PKEY_id(pkey.get()) != SSL_get_signature_algorithm_key_type(signature_algorithm)) { + return status; + } + + bssl::UniquePtr rsa(EVP_PKEY_get1_RSA(pkey.get())); + if (rsa == nullptr) { + return status; + } + + const EVP_MD* md = SSL_get_signature_algorithm_digest(signature_algorithm); + if (md == nullptr) { + return status; + } + + unsigned char hash[EVP_MAX_MD_SIZE]; + unsigned int hash_len; + if (!calculateDigest(md, in, in_len, hash, &hash_len)) { + return status; + } + + uint8_t* msg; + size_t msg_len; + int prefix_allocated = 0; + + // Add RSA padding to the the hash. Supported types are `PSS` and `PKCS1`. + if (SSL_is_signature_algorithm_rsa_pss(signature_algorithm)) { + msg_len = RSA_size(rsa.get()); + // We have to do manual memory management here, because BoringSSL tells in `prefix_allocated` + // variable whether or not memory needs to be freed. + msg = static_cast(OPENSSL_malloc(msg_len)); + if (msg == nullptr) { + return status; + } + prefix_allocated = 1; + if (!RSA_padding_add_PKCS1_PSS_mgf1(rsa.get(), msg, hash, md, nullptr, -1)) { + OPENSSL_free(msg); + return status; + } + } else { + if (!RSA_add_pkcs1_prefix(&msg, &msg_len, &prefix_allocated, EVP_MD_type(md), hash, hash_len)) { + if (prefix_allocated) { + OPENSSL_free(msg); + } + return status; + } + } + + // Create MB context which will be used for this particular + // signing/decryption. + CryptoMbRsaContextSharedPtr mb_ctx = + std::make_shared(std::move(pkey), ops->dispatcher_, ops->cb_); + + if (!mb_ctx->rsaInit(msg, msg_len)) { + if (prefix_allocated) { + OPENSSL_free(msg); + } + return status; + } + + if (prefix_allocated) { + OPENSSL_free(msg); + } + + ops->addToQueue(mb_ctx); + status = ssl_private_key_retry; + return status; +} + +ssl_private_key_result_t rsaPrivateKeySign(SSL* ssl, uint8_t* out, size_t* out_len, size_t max_out, + uint16_t signature_algorithm, const uint8_t* in, + size_t in_len) { + return ssl == nullptr ? ssl_private_key_failure + : rsaPrivateKeySignInternal( + static_cast(SSL_get_ex_data( + ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex())), + out, out_len, max_out, signature_algorithm, in, in_len); +} + +ssl_private_key_result_t rsaPrivateKeyDecryptInternal(CryptoMbPrivateKeyConnection* ops, uint8_t*, + size_t*, size_t, const uint8_t* in, + size_t in_len) { + + if (ops == nullptr) { + return ssl_private_key_failure; + } + + bssl::UniquePtr pkey = ops->getPrivateKey(); + + // Check if the SSL instance has correct data attached to it. + if (pkey == nullptr) { + return ssl_private_key_failure; + } + + CryptoMbRsaContextSharedPtr mb_ctx = + std::make_shared(std::move(pkey), ops->dispatcher_, ops->cb_); + + if (!mb_ctx->rsaInit(in, in_len)) { + return ssl_private_key_failure; + } + + ops->addToQueue(mb_ctx); + return ssl_private_key_retry; +} + +ssl_private_key_result_t rsaPrivateKeyDecrypt(SSL* ssl, uint8_t* out, size_t* out_len, + size_t max_out, const uint8_t* in, size_t in_len) { + return ssl == nullptr ? ssl_private_key_failure + : rsaPrivateKeyDecryptInternal( + static_cast(SSL_get_ex_data( + ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex())), + out, out_len, max_out, in, in_len); +} + +ssl_private_key_result_t privateKeyCompleteInternal(CryptoMbPrivateKeyConnection* ops, uint8_t* out, + size_t* out_len, size_t max_out) { + if (ops == nullptr) { + return ssl_private_key_failure; + } + + // Check if the MB operation is ready yet. This can happen if someone calls + // the top-level SSL function too early. The op status is only set from this + // thread. + if (ops->mb_ctx_->getStatus() == RequestStatus::Retry) { + return ssl_private_key_retry; + } + + // If this point is reached, the MB processing must be complete. + + // See if the operation failed. + if (ops->mb_ctx_->getStatus() != RequestStatus::Success) { + ops->logWarnMsg("private key operation failed."); + return ssl_private_key_failure; + } + + *out_len = ops->mb_ctx_->out_len_; + + if (*out_len > max_out) { + return ssl_private_key_failure; + } + + memcpy(out, ops->mb_ctx_->out_buf_, *out_len); // NOLINT(safe-memcpy) + + return ssl_private_key_success; +} + +ssl_private_key_result_t privateKeyComplete(SSL* ssl, uint8_t* out, size_t* out_len, + size_t max_out) { + return ssl == nullptr ? ssl_private_key_failure + : privateKeyCompleteInternal( + static_cast(SSL_get_ex_data( + ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex())), + out, out_len, max_out); +} + +} // namespace + +// External linking, meant for testing without SSL context. +ssl_private_key_result_t privateKeyCompleteForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out, + size_t* out_len, size_t max_out) { + return privateKeyCompleteInternal(ops, out, out_len, max_out); +} +ssl_private_key_result_t ecdsaPrivateKeySignForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out, + size_t* out_len, size_t max_out, + uint16_t signature_algorithm, const uint8_t* in, + size_t in_len) { + return ecdsaPrivateKeySignInternal(ops, out, out_len, max_out, signature_algorithm, in, in_len); +} +ssl_private_key_result_t rsaPrivateKeySignForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out, + size_t* out_len, size_t max_out, + uint16_t signature_algorithm, const uint8_t* in, + size_t in_len) { + return rsaPrivateKeySignInternal(ops, out, out_len, max_out, signature_algorithm, in, in_len); +} +ssl_private_key_result_t rsaPrivateKeyDecryptForTest(CryptoMbPrivateKeyConnection* ops, + uint8_t* out, size_t* out_len, size_t max_out, + const uint8_t* in, size_t in_len) { + return rsaPrivateKeyDecryptInternal(ops, out, out_len, max_out, in, in_len); +} + +CryptoMbQueue::CryptoMbQueue(std::chrono::milliseconds poll_delay, enum KeyType type, int keysize, + IppCryptoSharedPtr ipp, Event::Dispatcher& d) + : us_(std::chrono::duration_cast(poll_delay)), type_(type), + key_size_(keysize), ipp_(ipp), + timer_(d.createTimer([this]() -> void { processRequests(); })) { + request_queue_.reserve(MULTIBUFF_BATCH); +} + +void CryptoMbQueue::startTimer() { timer_->enableHRTimer(us_); } + +void CryptoMbQueue::stopTimer() { timer_->disableTimer(); } + +void CryptoMbQueue::addAndProcessEightRequests(CryptoMbContextSharedPtr mb_ctx) { + // Add the request to the processing queue. + ASSERT(request_queue_.size() < MULTIBUFF_BATCH); + request_queue_.push_back(mb_ctx); + + if (request_queue_.size() == MULTIBUFF_BATCH) { + // There are eight requests in the queue and we can process them. + stopTimer(); + ENVOY_LOG(debug, "processing directly 8 requests"); + processRequests(); + } else if (request_queue_.size() == 1) { + // First request in the queue, start the queue timer. + startTimer(); + } +} + +void CryptoMbQueue::processRequests() { + if (type_ == KeyType::Rsa) { + processRsaRequests(); + } + request_queue_.clear(); +} + +void CryptoMbQueue::processRsaRequests() { + + const unsigned char* rsa_priv_from[MULTIBUFF_BATCH] = {nullptr}; + unsigned char* rsa_priv_to[MULTIBUFF_BATCH] = {nullptr}; + const BIGNUM* rsa_lenstra_e[MULTIBUFF_BATCH] = {nullptr}; + const BIGNUM* rsa_lenstra_n[MULTIBUFF_BATCH] = {nullptr}; + const BIGNUM* rsa_priv_p[MULTIBUFF_BATCH] = {nullptr}; + const BIGNUM* rsa_priv_q[MULTIBUFF_BATCH] = {nullptr}; + const BIGNUM* rsa_priv_dmp1[MULTIBUFF_BATCH] = {nullptr}; + const BIGNUM* rsa_priv_dmq1[MULTIBUFF_BATCH] = {nullptr}; + const BIGNUM* rsa_priv_iqmp[MULTIBUFF_BATCH] = {nullptr}; + + /* Build arrays of pointers for call */ + for (unsigned req_num = 0; req_num < request_queue_.size(); req_num++) { + CryptoMbRsaContextSharedPtr mb_ctx = + std::static_pointer_cast(request_queue_[req_num]); + rsa_priv_from[req_num] = mb_ctx->in_buf_.get(); + rsa_priv_to[req_num] = mb_ctx->out_buf_; + rsa_priv_p[req_num] = mb_ctx->p_; + rsa_priv_q[req_num] = mb_ctx->q_; + rsa_priv_dmp1[req_num] = mb_ctx->dmp1_; + rsa_priv_dmq1[req_num] = mb_ctx->dmq1_; + rsa_priv_iqmp[req_num] = mb_ctx->iqmp_; + } + + ENVOY_LOG(debug, "Multibuffer RSA process {} requests", request_queue_.size()); + + uint32_t rsa_sts = + ipp_->mbxRsaPrivateCrtSslMb8(rsa_priv_from, rsa_priv_to, rsa_priv_p, rsa_priv_q, + rsa_priv_dmp1, rsa_priv_dmq1, rsa_priv_iqmp, key_size_); + + enum RequestStatus status[MULTIBUFF_BATCH] = {RequestStatus::Retry}; + + for (unsigned req_num = 0; req_num < request_queue_.size(); req_num++) { + CryptoMbRsaContextSharedPtr mb_ctx = + std::static_pointer_cast(request_queue_[req_num]); + if (ipp_->mbxGetSts(rsa_sts, req_num)) { + ENVOY_LOG(debug, "Multibuffer RSA request {} success", req_num); + status[req_num] = RequestStatus::Success; + } else { + ENVOY_LOG(debug, "Multibuffer RSA request {} failure", req_num); + status[req_num] = RequestStatus::Error; + } + + // `Lenstra` check (validate that we get the same result back). + rsa_priv_from[req_num] = rsa_priv_to[req_num]; + rsa_priv_to[req_num] = mb_ctx->lenstra_to_; + rsa_lenstra_e[req_num] = mb_ctx->e_; + rsa_lenstra_n[req_num] = mb_ctx->n_; + } + + rsa_sts = + ipp_->mbxRsaPublicSslMb8(rsa_priv_from, rsa_priv_to, rsa_lenstra_e, rsa_lenstra_n, key_size_); + + for (unsigned req_num = 0; req_num < request_queue_.size(); req_num++) { + CryptoMbRsaContextSharedPtr mb_ctx = + std::static_pointer_cast(request_queue_[req_num]); + enum RequestStatus ctx_status; + if (ipp_->mbxGetSts(rsa_sts, req_num)) { + if (CRYPTO_memcmp(mb_ctx->in_buf_.get(), rsa_priv_to[req_num], mb_ctx->out_len_) != 0) { + status[req_num] = RequestStatus::Error; + } + // else keep the previous status from the private key operation + } else { + status[req_num] = RequestStatus::Error; + } + + ctx_status = status[req_num]; + mb_ctx->scheduleCallback(ctx_status); + } +} + +CryptoMbPrivateKeyConnection::CryptoMbPrivateKeyConnection(Ssl::PrivateKeyConnectionCallbacks& cb, + Event::Dispatcher& dispatcher, + bssl::UniquePtr pkey, + CryptoMbQueue& queue) + : queue_(queue), dispatcher_(dispatcher), cb_(cb), pkey_(std::move(pkey)) {} + +void CryptoMbPrivateKeyMethodProvider::registerPrivateKeyMethod( + SSL* ssl, Ssl::PrivateKeyConnectionCallbacks& cb, Event::Dispatcher& dispatcher) { + + if (SSL_get_ex_data(ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex()) != nullptr) { + throw EnvoyException("Not registering the CryptoMb provider twice for same context"); + } + + ASSERT(tls_->currentThreadRegistered(), "Current thread needs to be registered."); + + CryptoMbQueue& queue = tls_->get()->queue_; + + CryptoMbPrivateKeyConnection* ops = + new CryptoMbPrivateKeyConnection(cb, dispatcher, bssl::UpRef(pkey_), queue); + SSL_set_ex_data(ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex(), ops); +} + +void CryptoMbPrivateKeyConnection::addToQueue(CryptoMbContextSharedPtr mb_ctx) { + mb_ctx_ = mb_ctx; + queue_.addAndProcessEightRequests(mb_ctx_); +} + +bool CryptoMbPrivateKeyMethodProvider::checkFips() { + // `ipp-crypto` library is not fips-certified at the moment + // (https://github.com/intel/ipp-crypto#certification). + return false; +} + +Ssl::BoringSslPrivateKeyMethodSharedPtr +CryptoMbPrivateKeyMethodProvider::getBoringSslPrivateKeyMethod() { + return method_; +} + +void CryptoMbPrivateKeyMethodProvider::unregisterPrivateKeyMethod(SSL* ssl) { + CryptoMbPrivateKeyConnection* ops = static_cast( + SSL_get_ex_data(ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex())); + SSL_set_ex_data(ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex(), nullptr); + delete ops; +} + +CryptoMbPrivateKeyMethodProvider::CryptoMbPrivateKeyMethodProvider( + const envoy::extensions::private_key_providers::cryptomb::v3alpha:: + CryptoMbPrivateKeyMethodConfig& conf, + Server::Configuration::TransportSocketFactoryContext& factory_context, IppCryptoSharedPtr ipp) + : api_(factory_context.api()), + tls_(ThreadLocal::TypedSlot::makeUnique(factory_context.threadLocal())) { + + if (!ipp->mbxIsCryptoMbApplicable(0)) { + throw EnvoyException("Multi-buffer CPU instructions not available."); + } + + std::chrono::milliseconds poll_delay = + std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(conf, poll_delay, 200)); + + std::string private_key = + Config::DataSource::read(conf.private_key(), false, factory_context.api()); + + bssl::UniquePtr bio( + BIO_new_mem_buf(const_cast(private_key.data()), private_key.size())); + + bssl::UniquePtr pkey(PEM_read_bio_PrivateKey(bio.get(), nullptr, nullptr, nullptr)); + if (pkey == nullptr) { + throw EnvoyException("Failed to read private key."); + } + + method_ = std::make_shared(); + + int key_size; + + if (EVP_PKEY_id(pkey.get()) == EVP_PKEY_RSA) { + ENVOY_LOG(debug, "CryptoMb key type: RSA"); + key_type_ = KeyType::Rsa; + + method_->sign = rsaPrivateKeySign; + method_->decrypt = rsaPrivateKeyDecrypt; + method_->complete = privateKeyComplete; + + RSA* rsa = EVP_PKEY_get0_RSA(pkey.get()); + + switch (RSA_bits(rsa)) { + case 1024: + key_size = 1024; + break; + case 2048: + key_size = 2048; + break; + case 3072: + key_size = 3072; + break; + case 4096: + key_size = 4096; + break; + default: + throw EnvoyException("Only RSA keys of 1024, 2048, 3072, and 4096 bits are supported."); + } + + // If longer keys are ever supported, remember to change the signature buffer to be larger. + ASSERT(key_size / 8 <= CryptoMbContext::MAX_SIGNATURE_SIZE); + + BIGNUM e_check; + // const BIGNUMs, memory managed by BoringSSL in RSA key structure. + const BIGNUM *e, *n, *d; + RSA_get0_key(rsa, &n, &e, &d); + BN_init(&e_check); + BN_add_word(&e_check, 65537); + if (e == nullptr || BN_ucmp(e, &e_check) != 0) { + BN_free(&e_check); + throw EnvoyException("Only RSA keys with \"e\" parameter value 65537 are allowed, because " + "we can validate the signatures using multi-buffer instructions."); + } + BN_free(&e_check); + } else if (EVP_PKEY_id(pkey.get()) == EVP_PKEY_EC) { + ENVOY_LOG(debug, "CryptoMb key type: ECDSA"); + key_type_ = KeyType::Ec; + + method_->sign = ecdsaPrivateKeySign; + method_->decrypt = ecdsaPrivateKeyDecrypt; + method_->complete = privateKeyComplete; + + const EC_GROUP* ecdsa_group = EC_KEY_get0_group(EVP_PKEY_get0_EC_KEY(pkey.get())); + if (ecdsa_group == nullptr) { + throw EnvoyException("Invalid ECDSA key."); + } + BIGNUMConstPtr order(EC_GROUP_get0_order(ecdsa_group)); + if (EC_GROUP_get_curve_name(ecdsa_group) != NID_X9_62_prime256v1) { + throw EnvoyException("Only P-256 ECDSA keys are supported."); + } + if (BN_num_bits(order.get()) < 160) { + throw EnvoyException("Too few significant bits."); + } + key_size = EC_GROUP_get_degree(ecdsa_group); + ASSERT(key_size == 256); + } else { + throw EnvoyException("Not supported key type, only EC and RSA are supported."); + } + + pkey_ = std::move(pkey); + + enum KeyType key_type = key_type_; + + // Create a single queue for every worker thread to avoid locking. + tls_->set([poll_delay, key_type, key_size, ipp](Event::Dispatcher& d) { + ENVOY_LOG(debug, "Created CryptoMb Queue for thread {}", d.name()); + return std::make_shared(poll_delay, key_type, key_size, ipp, d); + }); +} + +namespace { +int createIndex() { + int index = SSL_get_ex_new_index(0, nullptr, nullptr, nullptr, nullptr); + RELEASE_ASSERT(index >= 0, "Failed to get SSL user data index."); + return index; +} +} // namespace + +int CryptoMbPrivateKeyMethodProvider::connectionIndex() { + CONSTRUCT_ON_FIRST_USE(int, createIndex()); +} + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h b/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h new file mode 100644 index 0000000000000..b1a8a65bcc16a --- /dev/null +++ b/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h @@ -0,0 +1,189 @@ +#pragma once + +#include "envoy/api/api.h" +#include "envoy/event/dispatcher.h" +#include "envoy/ssl/private_key/private_key.h" +#include "envoy/ssl/private_key/private_key_config.h" +#include "envoy/thread_local/thread_local.h" + +#include "source/common/common/c_smart_ptr.h" +#include "source/common/common/logger.h" + +#include "contrib/cryptomb/private_key_providers/source/ipp_crypto.h" +#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.h" + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +namespace { +void dontFreeBN(const BIGNUM*) {} +} // namespace +using BIGNUMConstPtr = CSmartPtr; + +enum class RequestStatus { Retry, Success, Error }; +enum class KeyType { Rsa, Ec }; + +// CryptoMbContext holds the actual data to be signed or encrypted. It also has a +// reference to the worker thread dispatcher for communicating that it has +// has ran the `AVX-512` code and the result is ready to be used. +class CryptoMbContext { +public: + static constexpr ssize_t MAX_SIGNATURE_SIZE = 512; + + CryptoMbContext(Event::Dispatcher& dispatcher, Ssl::PrivateKeyConnectionCallbacks& cb); + virtual ~CryptoMbContext() = default; + + void setStatus(RequestStatus status) { status_ = status; } + enum RequestStatus getStatus() { return status_; } + void scheduleCallback(enum RequestStatus status); + + // Buffer length is the same as the max signature length (4096 bits = 512 bytes) + unsigned char out_buf_[MAX_SIGNATURE_SIZE]; + // The real length of the signature. + size_t out_len_{}; + // Incoming data buffer. + std::unique_ptr in_buf_; + +private: + // Whether the decryption / signing is ready. + enum RequestStatus status_ {}; + + Event::Dispatcher& dispatcher_; + Ssl::PrivateKeyConnectionCallbacks& cb_; + // For scheduling the callback to the next dispatcher cycle. + Event::SchedulableCallbackPtr schedulable_{}; +}; + +// CryptoMbRsaContext is a CryptoMbContext which holds the extra RSA parameters and has +// custom initialization function. It also has a separate buffer for RSA result +// verification. +class CryptoMbRsaContext : public CryptoMbContext { +public: + CryptoMbRsaContext(bssl::UniquePtr pkey, Event::Dispatcher& dispatcher, + Ssl::PrivateKeyConnectionCallbacks& cb) + : CryptoMbContext(dispatcher, cb), rsa_(EVP_PKEY_get1_RSA(pkey.get())) {} + bool rsaInit(const uint8_t* in, size_t in_len); + + // RSA key. + bssl::UniquePtr rsa_{}; + // RSA parameters. Const pointers, which will contain values whose memory is + // managed within BoringSSL RSA key structure, so not wrapped in smart + // pointers. + const BIGNUM* d_{}; + const BIGNUM* e_{}; + const BIGNUM* n_{}; + const BIGNUM* p_{}; + const BIGNUM* q_{}; + const BIGNUM* dmp1_{}; + const BIGNUM* dmq1_{}; + const BIGNUM* iqmp_{}; + + // Buffer for `Lenstra` check. + unsigned char lenstra_to_[MAX_SIGNATURE_SIZE]; +}; + +using CryptoMbContextSharedPtr = std::shared_ptr; +using CryptoMbRsaContextSharedPtr = std::shared_ptr; + +// CryptoMbQueue maintains the request queue and is able to process it. +class CryptoMbQueue : public Logger::Loggable { +public: + static constexpr uint32_t MULTIBUFF_BATCH = 8; + + CryptoMbQueue(std::chrono::milliseconds poll_delay, enum KeyType type, int keysize, + IppCryptoSharedPtr ipp, Event::Dispatcher& d); + void addAndProcessEightRequests(CryptoMbContextSharedPtr mb_ctx); + +private: + void processRequests(); + void processRsaRequests(); + void startTimer(); + void stopTimer(); + + // Polling delay. + std::chrono::microseconds us_{}; + + // Queue for the requests. + std::vector request_queue_; + + // Key size and key type allowed for this particular queue. + const enum KeyType type_; + int key_size_{}; + + // Thread local data slot. + ThreadLocal::SlotPtr slot_{}; + + // Crypto operations library interface. + IppCryptoSharedPtr ipp_{}; + + // Timer to trigger queue processing if eight requests are not received in time. + Event::TimerPtr timer_{}; +}; + +// CryptoMbPrivateKeyConnection maintains the data needed by a given SSL +// connection. +class CryptoMbPrivateKeyConnection : public Logger::Loggable { +public: + CryptoMbPrivateKeyConnection(Ssl::PrivateKeyConnectionCallbacks& cb, + Event::Dispatcher& dispatcher, bssl::UniquePtr pkey, + CryptoMbQueue& queue); + virtual ~CryptoMbPrivateKeyConnection() = default; + + bssl::UniquePtr getPrivateKey() { return bssl::UpRef(pkey_); }; + void logDebugMsg(std::string msg) { ENVOY_LOG(debug, "CryptoMb: {}", msg); } + void logWarnMsg(std::string msg) { ENVOY_LOG(warn, "CryptoMb: {}", msg); } + void addToQueue(CryptoMbContextSharedPtr mb_ctx); + + CryptoMbQueue& queue_; + Event::Dispatcher& dispatcher_; + Ssl::PrivateKeyConnectionCallbacks& cb_; + CryptoMbContextSharedPtr mb_ctx_{}; + +private: + Event::FileEventPtr ssl_async_event_{}; + bssl::UniquePtr pkey_; +}; + +// CryptoMbPrivateKeyMethodProvider handles the private key method operations for +// an SSL socket. +class CryptoMbPrivateKeyMethodProvider : public virtual Ssl::PrivateKeyMethodProvider, + public Logger::Loggable { +public: + CryptoMbPrivateKeyMethodProvider( + const envoy::extensions::private_key_providers::cryptomb::v3alpha:: + CryptoMbPrivateKeyMethodConfig& config, + Server::Configuration::TransportSocketFactoryContext& private_key_provider_context, + IppCryptoSharedPtr ipp); + + // Ssl::PrivateKeyMethodProvider + void registerPrivateKeyMethod(SSL* ssl, Ssl::PrivateKeyConnectionCallbacks& cb, + Event::Dispatcher& dispatcher) override; + void unregisterPrivateKeyMethod(SSL* ssl) override; + bool checkFips() override; + Ssl::BoringSslPrivateKeyMethodSharedPtr getBoringSslPrivateKeyMethod() override; + + static int connectionIndex(); + +private: + // Thread local data containing a single queue per worker thread. + struct ThreadLocalData : public ThreadLocal::ThreadLocalObject { + ThreadLocalData(std::chrono::milliseconds poll_delay, enum KeyType type, int keysize, + IppCryptoSharedPtr ipp, Event::Dispatcher& d) + : queue_(poll_delay, type, keysize, ipp, d){}; + CryptoMbQueue queue_; + }; + + Ssl::BoringSslPrivateKeyMethodSharedPtr method_{}; + Api::Api& api_; + bssl::UniquePtr pkey_; + enum KeyType key_type_; + + ThreadLocal::TypedSlotPtr tls_; +}; + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/source/ipp_crypto.h b/contrib/cryptomb/private_key_providers/source/ipp_crypto.h new file mode 100644 index 0000000000000..d33d02270b4aa --- /dev/null +++ b/contrib/cryptomb/private_key_providers/source/ipp_crypto.h @@ -0,0 +1,34 @@ +#pragma once + +#include "envoy/common/pure.h" + +#include "openssl/ssl.h" + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +class IppCrypto { +public: + virtual ~IppCrypto() = default; + + virtual int mbxIsCryptoMbApplicable(uint64_t features) PURE; + virtual uint32_t mbxRsaPrivateCrtSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8], + const BIGNUM* const p_pa[8], const BIGNUM* const q_pa[8], + const BIGNUM* const dp_pa[8], + const BIGNUM* const dq_pa[8], + const BIGNUM* const iq_pa[8], + int expected_rsa_bitsize) PURE; + virtual uint32_t mbxRsaPublicSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8], + const BIGNUM* const e_pa[8], const BIGNUM* const n_pa[8], + int expected_rsa_bitsize) PURE; + virtual bool mbxGetSts(uint32_t status, unsigned req_num) PURE; +}; + +using IppCryptoSharedPtr = std::shared_ptr; + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/source/ipp_crypto_impl.h b/contrib/cryptomb/private_key_providers/source/ipp_crypto_impl.h new file mode 100644 index 0000000000000..e27576ead61af --- /dev/null +++ b/contrib/cryptomb/private_key_providers/source/ipp_crypto_impl.h @@ -0,0 +1,41 @@ +#pragma once + +#include "contrib/cryptomb/private_key_providers/source/ipp_crypto.h" +#include "crypto_mb/cpu_features.h" +#include "crypto_mb/ec_nistp256.h" +#include "crypto_mb/rsa.h" + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +class IppCryptoImpl : public virtual IppCrypto { +public: + int mbxIsCryptoMbApplicable(uint64_t features) override { + return ::mbx_is_crypto_mb_applicable(features); + } + uint32_t mbxRsaPrivateCrtSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8], + const BIGNUM* const p_pa[8], const BIGNUM* const q_pa[8], + const BIGNUM* const dp_pa[8], const BIGNUM* const dq_pa[8], + const BIGNUM* const iq_pa[8], int expected_rsa_bitsize) override { + return ::mbx_rsa_private_crt_ssl_mb8(from_pa, to_pa, p_pa, q_pa, dp_pa, dq_pa, iq_pa, + expected_rsa_bitsize); + } + uint32_t mbxRsaPublicSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8], + const BIGNUM* const e_pa[8], const BIGNUM* const n_pa[8], + int expected_rsa_bitsize) override { + return ::mbx_rsa_public_ssl_mb8(from_pa, to_pa, e_pa, n_pa, expected_rsa_bitsize); + } + bool mbxGetSts(uint32_t status, unsigned req_num) override { + if (MBX_GET_STS(status, req_num) == MBX_STATUS_OK) { + return true; + } + return false; + }; +}; + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/test/BUILD b/contrib/cryptomb/private_key_providers/test/BUILD new file mode 100644 index 0000000000000..a48b639149f81 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/BUILD @@ -0,0 +1,77 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_cc_test_library", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_test_library( + name = "test_fake_factory", + srcs = [ + "fake_factory.cc", + ], + hdrs = [ + "fake_factory.h", + ], + external_deps = ["ssl"], + deps = [ + "//contrib/cryptomb/private_key_providers/source:cryptomb_private_key_provider_lib", + "//contrib/cryptomb/private_key_providers/source:ipp_crypto_wrapper_lib", + "//envoy/api:api_interface", + "//envoy/event:dispatcher_interface", + "//envoy/server:transport_socket_config_interface", + "//envoy/ssl/private_key:private_key_config_interface", + "//envoy/ssl/private_key:private_key_interface", + "//source/common/config:datasource_lib", + "//source/common/config:utility_lib", + "//source/common/protobuf:utility_lib", + "@envoy_api//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "config_test", + srcs = [ + "config_test.cc", + ], + data = [ + "//contrib/cryptomb/private_key_providers/test/test_data:certs", + ], + deps = [ + ":test_fake_factory", + "//source/common/common:random_generator_lib", + "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/ssl:ssl_mocks", + "//test/mocks/stats:stats_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:environment_lib", + "//test/test_common:registry_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_cc_test( + name = "ops_test", + srcs = [ + "ops_test.cc", + ], + data = [ + "//contrib/cryptomb/private_key_providers/test/test_data:certs", + ], + deps = [ + ":test_fake_factory", + "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib", + "//test/mocks/stats:stats_mocks", + "//test/test_common:environment_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/contrib/cryptomb/private_key_providers/test/config_test.cc b/contrib/cryptomb/private_key_providers/test/config_test.cc new file mode 100644 index 0000000000000..365e1024ee4ad --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/config_test.cc @@ -0,0 +1,274 @@ +#include + +#include "source/common/common/random_generator.h" +#include "source/extensions/transport_sockets/tls/private_key/private_key_manager_impl.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/mocks/common.h" +#include "test/mocks/server/transport_socket_factory_context.h" +#include "test/mocks/ssl/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/registry.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h" +#include "fake_factory.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider +parsePrivateKeyProviderFromV3Yaml(const std::string& yaml_string) { + envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider private_key_provider; + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml_string), private_key_provider); + return private_key_provider; +} + +class CryptoMbConfigTest : public Event::TestUsingSimulatedTime, public testing::Test { +public: + CryptoMbConfigTest() : api_(Api::createApiForTest(store_, time_system_)) { + ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_)); + ON_CALL(factory_context_, threadLocal()).WillByDefault(ReturnRef(tls_)); + ON_CALL(factory_context_, sslContextManager()).WillByDefault(ReturnRef(context_manager_)); + ON_CALL(context_manager_, privateKeyMethodManager()) + .WillByDefault(ReturnRef(private_key_method_manager_)); + } + + Ssl::PrivateKeyMethodProviderSharedPtr createWithConfig(std::string yaml, + bool supported_instruction_set = true) { + FakeCryptoMbPrivateKeyMethodFactory cryptomb_factory(supported_instruction_set); + Registry::InjectFactory + cryptomb_private_key_method_factory(cryptomb_factory); + + return factory_context_.sslContextManager() + .privateKeyMethodManager() + .createPrivateKeyMethodProvider(parsePrivateKeyProviderFromV3Yaml(yaml), factory_context_); + } + + Event::SimulatedTimeSystem time_system_; + NiceMock factory_context_; + Stats::IsolatedStoreImpl store_; + Api::ApiPtr api_; + NiceMock tls_; + NiceMock context_manager_; + TransportSockets::Tls::PrivateKeyMethodManagerImpl private_key_method_manager_; +}; + +TEST_F(CryptoMbConfigTest, CreateRsa1024) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem" } +)EOF"; + + Ssl::PrivateKeyMethodProviderSharedPtr provider = createWithConfig(yaml); + EXPECT_NE(nullptr, provider); + EXPECT_EQ(false, provider->checkFips()); + Ssl::BoringSslPrivateKeyMethodSharedPtr method = provider->getBoringSslPrivateKeyMethod(); + EXPECT_NE(nullptr, method); + + ssl_private_key_result_t res; + + res = method->sign(nullptr, nullptr, nullptr, 0, 0, nullptr, 0); + EXPECT_EQ(res, ssl_private_key_failure); + res = method->decrypt(nullptr, nullptr, nullptr, 0, nullptr, 0); + EXPECT_EQ(res, ssl_private_key_failure); + res = method->complete(nullptr, nullptr, nullptr, 0); + EXPECT_EQ(res, ssl_private_key_failure); +} + +TEST_F(CryptoMbConfigTest, CreateRsa2048) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048.pem" } +)EOF"; + + EXPECT_NE(nullptr, createWithConfig(yaml)); +} + +TEST_F(CryptoMbConfigTest, CreateRsa2048WithExponent3) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048-exponent-3.pem" } +)EOF"; + + EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml), EnvoyException, + "Only RSA keys with \"e\" parameter value 65537 are allowed, because " + "we can validate the signatures using multi-buffer instructions."); +} + +TEST_F(CryptoMbConfigTest, CreateRsa3072) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-3072.pem" } +)EOF"; + + EXPECT_NE(nullptr, createWithConfig(yaml)); +} + +TEST_F(CryptoMbConfigTest, CreateRsa4096) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem" } +)EOF"; + + EXPECT_NE(nullptr, createWithConfig(yaml)); +} + +TEST_F(CryptoMbConfigTest, CreateRsa512) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-512.pem" } +)EOF"; + + EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml), EnvoyException, + "Only RSA keys of 1024, 2048, 3072, and 4096 bits are supported."); +} + +TEST_F(CryptoMbConfigTest, CreateEcdsaP256) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem" } +)EOF"; + + Ssl::PrivateKeyMethodProviderSharedPtr provider = createWithConfig(yaml); + EXPECT_NE(nullptr, provider); + EXPECT_EQ(false, provider->checkFips()); + Ssl::BoringSslPrivateKeyMethodSharedPtr method = provider->getBoringSslPrivateKeyMethod(); + EXPECT_NE(nullptr, method); + + ssl_private_key_result_t res; + + res = method->sign(nullptr, nullptr, nullptr, 0, 0, nullptr, 0); + EXPECT_EQ(res, ssl_private_key_failure); + res = method->decrypt(nullptr, nullptr, nullptr, 0, nullptr, 0); + EXPECT_EQ(res, ssl_private_key_failure); + res = method->complete(nullptr, nullptr, nullptr, 0); + EXPECT_EQ(res, ssl_private_key_failure); +} + +TEST_F(CryptoMbConfigTest, CreateEcdsaP256Inline) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: + inline_string: | + -----BEGIN PRIVATE KEY----- + MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgIxp5QZ3YFaT8s+CR + rqUqeYSe5D9APgBZbyCvAkO2/JChRANCAARM53DFLHORcSyBpu5zpaG7/HfLXT8H + r1RaoGEiH9pi3MIKg1H+b8EaM1M4wURT2yXMjuvogQ6ixs0B1mvRkZnL + -----END PRIVATE KEY----- +)EOF"; + + EXPECT_NE(nullptr, createWithConfig(yaml)); +} + +TEST_F(CryptoMbConfigTest, CreateEcdsaP384) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p384.pem" } +)EOF"; + + EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml), EnvoyException, + "Only P-256 ECDSA keys are supported."); +} + +TEST_F(CryptoMbConfigTest, CreateMissingPrivateKey) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/missing.pem" } +)EOF"; + + EXPECT_THROW(createWithConfig(yaml), EnvoyException); +} + +TEST_F(CryptoMbConfigTest, CreateMissingKey) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0.02s + )EOF"; + + EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml), EnvoyException, + "Unexpected DataSource::specifier_case(): 0"); +} + +TEST_F(CryptoMbConfigTest, CreateMissingPollDelay) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem" } + )EOF"; + + EXPECT_THROW_WITH_REGEX(createWithConfig(yaml), EnvoyException, + "Proto constraint validation failed"); +} + +TEST_F(CryptoMbConfigTest, CreateZeroPollDelay) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + poll_delay: 0s + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem" } + )EOF"; + + EXPECT_THROW_WITH_REGEX(createWithConfig(yaml), EnvoyException, + "Proto constraint validation failed"); +} + +TEST_F(CryptoMbConfigTest, CreateNotSupportedInstructionSet) { + const std::string yaml = R"EOF( + provider_name: cryptomb + typed_config: + "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig + private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem" } + poll_delay: 0.02s + )EOF"; + + EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml, false), EnvoyException, + "Multi-buffer CPU instructions not available."); +} + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/test/fake_factory.cc b/contrib/cryptomb/private_key_providers/test/fake_factory.cc new file mode 100644 index 0000000000000..1fddc745f732f --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/fake_factory.cc @@ -0,0 +1,171 @@ +#include "fake_factory.h" + +#include + +#include "envoy/registry/registry.h" +#include "envoy/server/transport_socket_config.h" + +#include "source/common/config/datasource.h" +#include "source/common/config/utility.h" +#include "source/common/protobuf/message_validator_impl.h" +#include "source/common/protobuf/utility.h" + +#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h" +#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.h" +#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.validate.h" +#include "openssl/rsa.h" +#include "openssl/ssl.h" + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +FakeIppCryptoImpl::FakeIppCryptoImpl(bool supported_instruction_set) + : supported_instruction_set_(supported_instruction_set) {} + +FakeIppCryptoImpl::~FakeIppCryptoImpl() { + BN_free(n_); + BN_free(e_); + BN_free(d_); +} + +int FakeIppCryptoImpl::mbxIsCryptoMbApplicable(uint64_t) { + return supported_instruction_set_ ? 1 : 0; +} + +uint32_t FakeIppCryptoImpl::mbxSetSts(uint32_t status, unsigned req_num, bool success) { + if (success) { + // clear bit req_num + return status & ~(1UL << req_num); + } + // set bit req_num + return status | (1UL << req_num); +} + +bool FakeIppCryptoImpl::mbxGetSts(uint32_t status, unsigned req_num) { + // return true if bit req_num if not set + return !((status >> req_num) & 1UL); +} + +uint32_t FakeIppCryptoImpl::mbxRsaPrivateCrtSslMb8( + const uint8_t* const from_pa[8], uint8_t* const to_pa[8], const BIGNUM* const p_pa[8], + const BIGNUM* const q_pa[8], const BIGNUM* const dp_pa[8], const BIGNUM* const dq_pa[8], + const BIGNUM* const iq_pa[8], int expected_rsa_bitsize) { + + uint32_t status = 0xff; + + for (int i = 0; i < 8; i++) { + RSA* rsa; + size_t out_len = 0; + int ret; + + if (from_pa[i] == nullptr) { + break; + } + + rsa = RSA_new(); + + RSA_set0_factors(rsa, BN_dup(p_pa[i]), BN_dup(q_pa[i])); + RSA_set0_crt_params(rsa, BN_dup(dp_pa[i]), BN_dup(dq_pa[i]), BN_dup(iq_pa[i])); + + // The real `mbx_rsa_private_crt_ssl_mb8` doesn't require these parameters to + // be set, but BoringSSL does. That's why they are provided out-of-band in + // the factory initialization. + RSA_set0_key(rsa, BN_dup(n_), BN_dup(e_), BN_dup(d_)); + + // From the docs: "Memory buffers of the plain- and `ciphertext` must be `ceil(rsaBitlen/8)` + // bytes length." + ret = RSA_sign_raw(rsa, &out_len, to_pa[i], expected_rsa_bitsize / 8, from_pa[i], + expected_rsa_bitsize / 8, RSA_NO_PADDING); + + RSA_free(rsa); + + status = mbxSetSts(status, i, inject_errors_ ? !ret : ret); + } + + UNREFERENCED_PARAMETER(expected_rsa_bitsize); + + return status; +} + +uint32_t FakeIppCryptoImpl::mbxRsaPublicSslMb8(const uint8_t* const from_pa[8], + uint8_t* const to_pa[8], const BIGNUM* const e_pa[8], + const BIGNUM* const n_pa[8], + int expected_rsa_bitsize) { + uint32_t status = 0xff; + + for (int i = 0; i < 8; i++) { + RSA* rsa; + size_t out_len = 0; + int ret; + + if (e_pa[i] == nullptr) { + break; + } + + rsa = RSA_new(); + + RSA_set0_key(rsa, BN_dup(n_pa[i]), BN_dup(e_pa[i]), BN_dup(d_)); + + ret = RSA_verify_raw(rsa, &out_len, to_pa[i], expected_rsa_bitsize / 8, from_pa[i], + expected_rsa_bitsize / 8, RSA_NO_PADDING); + + RSA_free(rsa); + + status = mbxSetSts(status, i, inject_errors_ ? !ret : ret); + } + + UNREFERENCED_PARAMETER(expected_rsa_bitsize); + + return status; +} + +FakeCryptoMbPrivateKeyMethodFactory::FakeCryptoMbPrivateKeyMethodFactory( + bool supported_instruction_set) + : supported_instruction_set_(supported_instruction_set) {} + +Ssl::PrivateKeyMethodProviderSharedPtr +FakeCryptoMbPrivateKeyMethodFactory::createPrivateKeyMethodProviderInstance( + const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& proto_config, + Server::Configuration::TransportSocketFactoryContext& private_key_provider_context) { + ProtobufTypes::MessagePtr message = + std::make_unique(); + + Config::Utility::translateOpaqueConfig(proto_config.typed_config(), + ProtobufMessage::getNullValidationVisitor(), *message); + const envoy::extensions::private_key_providers::cryptomb::v3alpha::CryptoMbPrivateKeyMethodConfig + conf = + MessageUtil::downcastAndValidate( + *message, private_key_provider_context.messageValidationVisitor()); + + std::shared_ptr fakeIpp = + std::make_shared(supported_instruction_set_); + + // We need to get more RSA key params in order to be able to use BoringSSL signing functions. + std::string private_key = + Config::DataSource::read(conf.private_key(), false, private_key_provider_context.api()); + + bssl::UniquePtr bio( + BIO_new_mem_buf(const_cast(private_key.data()), private_key.size())); + + bssl::UniquePtr pkey(PEM_read_bio_PrivateKey(bio.get(), nullptr, nullptr, nullptr)); + if (pkey != nullptr && EVP_PKEY_id(pkey.get()) == EVP_PKEY_RSA) { + const BIGNUM *e, *n, *d; + RSA* rsa = EVP_PKEY_get0_RSA(pkey.get()); + RSA_get0_key(rsa, &n, &e, &d); + fakeIpp->setRsaKey(n, e, d); + } + + IppCryptoSharedPtr ipp = std::dynamic_pointer_cast(fakeIpp); + + return std::make_shared(conf, private_key_provider_context, + ipp); +} + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/test/fake_factory.h b/contrib/cryptomb/private_key_providers/test/fake_factory.h new file mode 100644 index 0000000000000..46fa5d3049e24 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/fake_factory.h @@ -0,0 +1,65 @@ +#pragma once + +#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "envoy/ssl/private_key/private_key.h" +#include "envoy/ssl/private_key/private_key_config.h" + +#include "contrib/cryptomb/private_key_providers/source/ipp_crypto.h" + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +class FakeIppCryptoImpl : public virtual IppCrypto { +public: + FakeIppCryptoImpl(bool supported_instruction_set); + ~FakeIppCryptoImpl() override; + + int mbxIsCryptoMbApplicable(uint64_t features) override; + uint32_t mbxRsaPrivateCrtSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8], + const BIGNUM* const p_pa[8], const BIGNUM* const q_pa[8], + const BIGNUM* const dp_pa[8], const BIGNUM* const dq_pa[8], + const BIGNUM* const iq_pa[8], int expected_rsa_bitsize) override; + uint32_t mbxRsaPublicSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8], + const BIGNUM* const e_pa[8], const BIGNUM* const n_pa[8], + int expected_rsa_bitsize) override; + bool mbxGetSts(uint32_t status, unsigned req_num) override; + + void setRsaKey(const BIGNUM* n, const BIGNUM* e, const BIGNUM* d) { + n_ = BN_dup(n); + e_ = BN_dup(e); + d_ = BN_dup(d); + }; + + void injectErrors(bool enabled) { inject_errors_ = enabled; } + +private: + uint32_t mbxSetSts(uint32_t status, unsigned req_num, bool success); + + bool supported_instruction_set_; + BIGNUM* n_{}; + BIGNUM* e_{}; + BIGNUM* d_{}; + + bool inject_errors_{}; +}; + +class FakeCryptoMbPrivateKeyMethodFactory : public Ssl::PrivateKeyMethodProviderInstanceFactory { +public: + FakeCryptoMbPrivateKeyMethodFactory(bool supported_instruction_set); + + // Ssl::PrivateKeyMethodProviderInstanceFactory + Ssl::PrivateKeyMethodProviderSharedPtr createPrivateKeyMethodProviderInstance( + const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& message, + Server::Configuration::TransportSocketFactoryContext& private_key_provider_context) override; + std::string name() const override { return "cryptomb"; }; + +private: + bool supported_instruction_set_; +}; + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/test/ops_test.cc b/contrib/cryptomb/private_key_providers/test/ops_test.cc new file mode 100644 index 0000000000000..0756d5869a8fe --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/ops_test.cc @@ -0,0 +1,436 @@ +#include + +#include "source/extensions/transport_sockets/tls/private_key/private_key_manager_impl.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/test_common/environment.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h" +#include "fake_factory.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace PrivateKeyMethodProvider { +namespace CryptoMb { + +class TestCallbacks : public Envoy::Ssl::PrivateKeyConnectionCallbacks { +public: + void onPrivateKeyMethodComplete() override{ + + }; +}; + +// Testing interface +ssl_private_key_result_t privateKeyCompleteForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out, + size_t* out_len, size_t max_out); +ssl_private_key_result_t ecdsaPrivateKeySignForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out, + size_t* out_len, size_t max_out, + uint16_t signature_algorithm, const uint8_t* in, + size_t in_len); +ssl_private_key_result_t rsaPrivateKeySignForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out, + size_t* out_len, size_t max_out, + uint16_t signature_algorithm, const uint8_t* in, + size_t in_len); +ssl_private_key_result_t rsaPrivateKeyDecryptForTest(CryptoMbPrivateKeyConnection* ops, + uint8_t* out, size_t* out_len, size_t max_out, + const uint8_t* in, size_t in_len); + +bssl::UniquePtr makeRsaKey() { + std::string file = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem")); + bssl::UniquePtr bio(BIO_new_mem_buf(file.data(), file.size())); + + bssl::UniquePtr key(EVP_PKEY_new()); + + RSA* rsa = PEM_read_bio_RSAPrivateKey(bio.get(), nullptr, nullptr, nullptr); + RELEASE_ASSERT(rsa != nullptr, "PEM_read_bio_RSAPrivateKey failed."); + RELEASE_ASSERT(1 == EVP_PKEY_assign_RSA(key.get(), rsa), "EVP_PKEY_assign_RSA failed."); + return key; +} + +bssl::UniquePtr makeEcdsaKey() { + std::string file = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir " + "}}/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem")); + bssl::UniquePtr bio(BIO_new_mem_buf(file.data(), file.size())); + + bssl::UniquePtr key(EVP_PKEY_new()); + + EC_KEY* ec = PEM_read_bio_ECPrivateKey(bio.get(), nullptr, nullptr, nullptr); + + RELEASE_ASSERT(ec != nullptr, "PEM_read_bio_ECPrivateKey failed."); + RELEASE_ASSERT(1 == EVP_PKEY_assign_EC_KEY(key.get(), ec), "EVP_PKEY_assign_EC_KEY failed."); + return key; +} + +TEST(CryptoMbProviderTest, TestEcdsaSigning) { + Event::SimulatedTimeSystem time_system; + Stats::TestUtil::TestStore server_stats_store; + Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system); + Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + bssl::UniquePtr pkey = makeEcdsaKey(); + std::shared_ptr fakeIpp = std::make_shared(true); + + CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Ec, 256, fakeIpp, *dispatcher); + + size_t in_len = 32; + uint8_t in[32] = {0x7f}; + size_t out_len = 0; + uint8_t out[128] = {0}; + + ssl_private_key_result_t res; + TestCallbacks cbs; + + // First request + CryptoMbPrivateKeyConnection op(cbs, *dispatcher, bssl::UpRef(pkey), queue); + res = ecdsaPrivateKeySignForTest(&op, out, &out_len, 128, SSL_SIGN_ECDSA_SECP256R1_SHA256, in, + in_len); + EXPECT_EQ(res, ssl_private_key_success); +} + +TEST(CryptoMbProviderTest, TestRsaPkcs1Signing) { + Event::SimulatedTimeSystem time_system; + Stats::TestUtil::TestStore server_stats_store; + Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system); + Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + bssl::UniquePtr pkey = makeRsaKey(); + std::shared_ptr fakeIpp = std::make_shared(true); + RSA* rsa = EVP_PKEY_get0_RSA(pkey.get()); + const BIGNUM *e, *n, *d; + RSA_get0_key(rsa, &n, &e, &d); + fakeIpp->setRsaKey(n, e, d); + + CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher); + + size_t in_len = 32; + uint8_t in[32] = {0x7f}; + + ssl_private_key_result_t res; + TestCallbacks cbs[8]; + + // First request + CryptoMbPrivateKeyConnection op0(cbs[0], *dispatcher, bssl::UpRef(pkey), queue); + res = + rsaPrivateKeySignForTest(&op0, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + res = privateKeyCompleteForTest(&op0, nullptr, nullptr, 128); + // No processing done yet after first request + EXPECT_EQ(res, ssl_private_key_retry); + + // Second request + CryptoMbPrivateKeyConnection op1(cbs[1], *dispatcher, bssl::UpRef(pkey), queue); + res = + rsaPrivateKeySignForTest(&op1, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + res = privateKeyCompleteForTest(&op1, nullptr, nullptr, 128); + // No processing done yet after second request + EXPECT_EQ(res, ssl_private_key_retry); + + // Six more requests + CryptoMbPrivateKeyConnection op2(cbs[2], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op3(cbs[3], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op4(cbs[4], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op5(cbs[5], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op6(cbs[6], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op7(cbs[7], *dispatcher, bssl::UpRef(pkey), queue); + res = + rsaPrivateKeySignForTest(&op2, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = + rsaPrivateKeySignForTest(&op3, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = + rsaPrivateKeySignForTest(&op4, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = + rsaPrivateKeySignForTest(&op5, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = + rsaPrivateKeySignForTest(&op6, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = + rsaPrivateKeySignForTest(&op7, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + size_t out_len = 0; + uint8_t out[128] = {0}; + + res = privateKeyCompleteForTest(&op0, out, &out_len, 128); + // Since the status is set only from the event loop (which is not run) this should be still + // "retry". The cryptographic result is present anyway. + EXPECT_EQ(res, ssl_private_key_retry); + + op0.mb_ctx_->setStatus(RequestStatus::Success); + res = privateKeyCompleteForTest(&op0, out, &out_len, 128); + EXPECT_EQ(res, ssl_private_key_success); + EXPECT_NE(out_len, 0); +} + +TEST(CryptoMbProviderTest, TestRsaPssSigning) { + Event::SimulatedTimeSystem time_system; + Stats::TestUtil::TestStore server_stats_store; + Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system); + Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + bssl::UniquePtr pkey = makeRsaKey(); + std::shared_ptr fakeIpp = std::make_shared(true); + RSA* rsa = EVP_PKEY_get0_RSA(pkey.get()); + const BIGNUM *e, *n, *d; + RSA_get0_key(rsa, &n, &e, &d); + fakeIpp->setRsaKey(n, e, d); + + CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher); + + size_t in_len = 32; + uint8_t in[32] = {0x7f}; + + ssl_private_key_result_t res; + TestCallbacks cbs[8]; + + // First request + CryptoMbPrivateKeyConnection op0(cbs[0], *dispatcher, bssl::UpRef(pkey), queue); + res = rsaPrivateKeySignForTest(&op0, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + res = privateKeyCompleteForTest(&op0, nullptr, nullptr, 128); + // No processing done yet after first request + EXPECT_EQ(res, ssl_private_key_retry); + + // Second request + CryptoMbPrivateKeyConnection op1(cbs[1], *dispatcher, bssl::UpRef(pkey), queue); + res = rsaPrivateKeySignForTest(&op1, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + res = privateKeyCompleteForTest(&op1, nullptr, nullptr, 128); + // No processing done yet after second request + EXPECT_EQ(res, ssl_private_key_retry); + + // Six more requests + CryptoMbPrivateKeyConnection op2(cbs[2], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op3(cbs[3], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op4(cbs[4], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op5(cbs[5], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op6(cbs[6], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op7(cbs[7], *dispatcher, bssl::UpRef(pkey), queue); + res = rsaPrivateKeySignForTest(&op2, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeySignForTest(&op3, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeySignForTest(&op4, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeySignForTest(&op5, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeySignForTest(&op6, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeySignForTest(&op7, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + size_t out_len = 0; + uint8_t out[128] = {0}; + + res = privateKeyCompleteForTest(&op0, out, &out_len, 128); + // Since the status is set only from the event loop (which is not run) this should be still + // "retry". The cryptographic result is present anyway. + EXPECT_EQ(res, ssl_private_key_retry); + + op0.mb_ctx_->setStatus(RequestStatus::Success); + res = privateKeyCompleteForTest(&op0, out, &out_len, 128); + EXPECT_EQ(res, ssl_private_key_success); + EXPECT_NE(out_len, 0); +} + +TEST(CryptoMbProviderTest, TestRsaDecrypt) { + Event::SimulatedTimeSystem time_system; + Stats::TestUtil::TestStore server_stats_store; + Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system); + Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + bssl::UniquePtr pkey = makeRsaKey(); + std::shared_ptr fakeIpp = std::make_shared(true); + RSA* rsa = EVP_PKEY_get0_RSA(pkey.get()); + const BIGNUM *e, *n, *d; + RSA_get0_key(rsa, &n, &e, &d); + fakeIpp->setRsaKey(n, e, d); + + CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher); + + size_t in_len = 32; + uint8_t in[32] = {0x7f}; + + ssl_private_key_result_t res; + TestCallbacks cbs[8]; + + // First request + CryptoMbPrivateKeyConnection op0(cbs[0], *dispatcher, bssl::UpRef(pkey), queue); + res = rsaPrivateKeyDecryptForTest(&op0, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + res = privateKeyCompleteForTest(&op0, nullptr, nullptr, 128); + // No processing done yet after first request + EXPECT_EQ(res, ssl_private_key_retry); + + // Second request + CryptoMbPrivateKeyConnection op1(cbs[1], *dispatcher, bssl::UpRef(pkey), queue); + res = rsaPrivateKeyDecryptForTest(&op1, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + res = privateKeyCompleteForTest(&op1, nullptr, nullptr, 128); + // No processing done yet after second request + EXPECT_EQ(res, ssl_private_key_retry); + + // Six more requests + CryptoMbPrivateKeyConnection op2(cbs[2], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op3(cbs[3], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op4(cbs[4], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op5(cbs[5], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op6(cbs[6], *dispatcher, bssl::UpRef(pkey), queue); + CryptoMbPrivateKeyConnection op7(cbs[7], *dispatcher, bssl::UpRef(pkey), queue); + res = rsaPrivateKeyDecryptForTest(&op2, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeyDecryptForTest(&op3, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeyDecryptForTest(&op4, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeyDecryptForTest(&op5, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeyDecryptForTest(&op6, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + res = rsaPrivateKeyDecryptForTest(&op7, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + size_t out_len = 0; + uint8_t out[128] = {0}; + + res = privateKeyCompleteForTest(&op0, out, &out_len, 128); + // Since the status is set only from the event loop (which is not run) this should be still + // "retry". The cryptographic result is present anyway. + EXPECT_EQ(res, ssl_private_key_retry); + + op0.mb_ctx_->setStatus(RequestStatus::Success); + res = privateKeyCompleteForTest(&op0, out, &out_len, 128); + EXPECT_EQ(res, ssl_private_key_success); + EXPECT_NE(out_len, 0); +} + +TEST(CryptoMbProviderTest, TestErrors) { + Event::SimulatedTimeSystem time_system; + Stats::TestUtil::TestStore server_stats_store; + Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system); + Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + bssl::UniquePtr pkey = makeEcdsaKey(); + bssl::UniquePtr rsa_pkey = makeRsaKey(); + std::shared_ptr fakeIpp = std::make_shared(true); + + CryptoMbQueue ec_queue(std::chrono::milliseconds(200), KeyType::Ec, 256, fakeIpp, *dispatcher); + CryptoMbQueue rsa_queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher); + + size_t in_len = 32; + uint8_t in[32] = {0x7f}; + + ssl_private_key_result_t res; + TestCallbacks cb; + + CryptoMbPrivateKeyConnection op_ec(cb, *dispatcher, bssl::UpRef(pkey), ec_queue); + CryptoMbPrivateKeyConnection op_rsa(cb, *dispatcher, bssl::UpRef(rsa_pkey), rsa_queue); + + // no operation defined + res = ecdsaPrivateKeySignForTest(nullptr, nullptr, nullptr, 128, SSL_SIGN_ECDSA_SECP256R1_SHA256, + in, in_len); + EXPECT_EQ(res, ssl_private_key_failure); + res = + rsaPrivateKeySignForTest(nullptr, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_failure); + res = rsaPrivateKeyDecryptForTest(nullptr, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_failure); + + // Unknown signature algorithm + res = ecdsaPrivateKeySignForTest(&op_ec, nullptr, nullptr, 128, 1234, in, in_len); + EXPECT_EQ(res, ssl_private_key_failure); + res = rsaPrivateKeySignForTest(&op_rsa, nullptr, nullptr, 128, 1234, in, in_len); + EXPECT_EQ(res, ssl_private_key_failure); + + // Wrong signature algorithm + res = ecdsaPrivateKeySignForTest(&op_ec, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, + in_len); + EXPECT_EQ(res, ssl_private_key_failure); + res = rsaPrivateKeySignForTest(&op_rsa, nullptr, nullptr, 128, SSL_SIGN_ECDSA_SECP256R1_SHA256, + in, in_len); + EXPECT_EQ(res, ssl_private_key_failure); + + // Wrong operation type + res = ecdsaPrivateKeySignForTest(&op_rsa, nullptr, nullptr, 128, SSL_SIGN_ECDSA_SECP256R1_SHA256, + in, in_len); + EXPECT_EQ(res, ssl_private_key_failure); + res = + rsaPrivateKeySignForTest(&op_ec, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_failure); + res = rsaPrivateKeyDecryptForTest(&op_ec, nullptr, nullptr, 128, in, in_len); + EXPECT_EQ(res, ssl_private_key_failure); +} + +TEST(CryptoMbProviderTest, TestRSATimer) { + Event::SimulatedTimeSystem time_system; + Stats::TestUtil::TestStore server_stats_store; + Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system); + Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + bssl::UniquePtr pkey = makeRsaKey(); + std::shared_ptr fakeIpp = std::make_shared(true); + RSA* rsa = EVP_PKEY_get0_RSA(pkey.get()); + const BIGNUM *e, *n, *d; + RSA_get0_key(rsa, &n, &e, &d); + fakeIpp->setRsaKey(n, e, d); + + CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher); + + size_t in_len = 32; + uint8_t in[32] = {0x7f}; + + ssl_private_key_result_t res; + TestCallbacks cbs[8]; + + // First request + CryptoMbPrivateKeyConnection op0(cbs[0], *dispatcher, bssl::UpRef(pkey), queue); + res = + rsaPrivateKeySignForTest(&op0, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + res = privateKeyCompleteForTest(&op0, nullptr, nullptr, 128); + // No processing done yet after first request + EXPECT_EQ(res, ssl_private_key_retry); + + time_system.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher, + Event::Dispatcher::RunType::NonBlock); + + size_t out_len = 0; + uint8_t out[128] = {0}; + + res = privateKeyCompleteForTest(&op0, out, &out_len, 128); + EXPECT_EQ(res, ssl_private_key_success); + EXPECT_NE(out_len, 0); + + // Add crypto library errors + fakeIpp->injectErrors(true); + + CryptoMbPrivateKeyConnection op1(cbs[0], *dispatcher, bssl::UpRef(pkey), queue); + res = + rsaPrivateKeySignForTest(&op1, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len); + EXPECT_EQ(res, ssl_private_key_retry); + + res = privateKeyCompleteForTest(&op1, nullptr, nullptr, 128); + // No processing done yet after first request + EXPECT_EQ(res, ssl_private_key_retry); + + time_system.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher, + Event::Dispatcher::RunType::NonBlock); + + res = privateKeyCompleteForTest(&op1, out, &out_len, 128); + EXPECT_EQ(res, ssl_private_key_failure); +} + +} // namespace CryptoMb +} // namespace PrivateKeyMethodProvider +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/cryptomb/private_key_providers/test/test_data/BUILD b/contrib/cryptomb/private_key_providers/test/test_data/BUILD new file mode 100644 index 0000000000000..f55a73857b846 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/BUILD @@ -0,0 +1,13 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +filegroup( + name = "certs", + srcs = glob(["*.pem"]), +) diff --git a/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem b/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem new file mode 100644 index 0000000000000..60d9e4c83180e --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIMpJw5U66K+DcA963b+/jZYrMrZDjaB0khHSwZte3vYCoAoGCCqGSM49 +AwEHoUQDQgAELp3XvBfkVWQBOKo3ttAaJ6SUaUb8uKqCS504WXHWMO4h89F+nYtC +Ecgl8EiLXXyc86tawKjGdizcCjrKMiFo3A== +-----END EC PRIVATE KEY----- diff --git a/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p384.pem b/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p384.pem new file mode 100644 index 0000000000000..9bf5ffe14bb33 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p384.pem @@ -0,0 +1,6 @@ +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDBE7nkbGPIsiG0S1vajwJkfVtlZM7+KhrN9LSqolHsNAv/t4kGA0Sn3 +McnWqcts9RugBwYFK4EEACKhZANiAAT2tXd7DLnmD9JL+YNYH4+RgBgQSD5DnP90 +Xu8uuOUZwO3ZLdzuf+TRs0MneULXS3fWqBCYo7gNPRdZR40QrT/4dQGpQsDAFl3f +Yg9Un5cxR+XovaseGsnMQoP80majEYA= +-----END EC PRIVATE KEY----- diff --git a/contrib/cryptomb/private_key_providers/test/test_data/generate-keys.sh b/contrib/cryptomb/private_key_providers/test/test_data/generate-keys.sh new file mode 100755 index 0000000000000..95b1e59faf379 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/generate-keys.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +openssl ecparam -name prime256v1 -genkey -noout -out ecdsa-p256.pem +openssl ecparam -name secp384r1 -genkey -noout -out ecdsa-p384.pem +openssl genrsa -out rsa-512.pem 512 +openssl genrsa -out rsa-1024.pem 1024 +openssl genrsa -out rsa-2048.pem 2048 +openssl genrsa -3 -out rsa-2048-exponent-3.pem 2048 +openssl genrsa -out rsa-3072.pem 3072 +openssl genrsa -out rsa-4096.pem 4096 + diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem new file mode 100644 index 0000000000000..38f23d45a246e --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXwIBAAKBgQDX/SburqyA+DDb1Kz24bC1QPqXAZKKk5eUX6kXyU4Xg/532gvb +eX9ie2S/NdQwH9UU4jO2pTdph7R7BJZlqTxRjrIre83mIeZPlsUh4T7LACE19T0p +xvxxjZphRYUNcz+06KRaT91IdscCJkVCHFPD9DquVMyUwp5pjfFEw6eRYQIDAQAB +AoGBAM2FlLs/uOPLxuoXWDJflT8Twp9YHrIAORc1Y3g/1DAqKESxVeEUnnL+iWIs +/WiBkceaaqzcT3r6Z3E7b0TzIXGVctJhOomclH6+NNOtC63WWXh6IFX/9YBDszUS +kRsIvOiqDqqAm6VVYpSArJvXFHMMLKvFtqu+AFW8zFkjkrhlAkEA+ZVunIWERDWL +KMxX/BZCkeIH3Rv/TczRrj7WqWPcIIPsBOsvqfL/xI3peagFs3TeLU/o16caWoLS +M7TltvTqtwJBAN2Kn+XiVm7GB9ITwoTNH95n6GL+3fX0jgwUvsygfo/UlhWoIMUh +Rb2Ic/72cDasBrnxIWjKlahqEOJh0YvNLKcCQQCYKP1VmaTovMgJaINfoeaV7/qh +V9dPhEZ1d8QBY2spu6Ph38ygTRCXsXkc/U30eZSWhXhMOYk8kzM56Nh/sVODAkEA +gAMzI9WmVfnt2PD8DFqu2Ie4G0PkI9P9JHP0UC9JEnknhDoTPXVdZAht5lymOKEs +fdMcl/2/foJTYUxeleanrwJBAKpgVmeC6Z8aUiDCJOEgfr9KvvWf77S2+PmgZwFx +lSK/Hz/MDhTHJlSjOin3gpl82VL5cmZywClkWIh1wE8PCvg= +-----END RSA PRIVATE KEY----- diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048-exponent-3.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048-exponent-3.pem new file mode 100644 index 0000000000000..87786c8c63f74 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048-exponent-3.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAwOQVkfs063s0I/wq/cBW63pX+6OxMxLXohNdBHCITbnwnI1B +xkEA6FvNWN+xRH2AW/8aiyyYSy38BErJwgOAbA1e9b6knYfZb5IIMVmnx519R7CK +TdYrZMnHbyvN3U2xULJZ+y+gPCnRihZSrL/hXlYHkyt/ef5OSxshyrjxN9RmxBLs +zTus5G+04mHN2RlmnjOALOjXApeHD9YGLddODqp89TUV+LpddI+s+0KFaRUWMxne +pegA+awoGSk4OGwHFbkMz5eIH9CZYCL3d6nt3Q9R48t4wsDGTJaNaAeg5Bv+6+pO +MuP9QteNl3d3ro/8KZOe7vvXDuAy/86tTjbYswIBAwKCAQEAgJgOYVIjR6d4F/1x +/oA58lGP/RfLd2HlFreTWEsFiSagaF4r2YCrRZKI5ep2LakAPVS8XMhlh3P9WDHb +1q0ASAjp+SnDE6/mSmFay5EahROo2nWxiTlyQzEvn3KJPjPLiyGRUh/AKBvhBrmM +cyqWPuQFDMeqUVQ0MhIWhyX2JTccM09IXbmV2WpAijw7encniBB8jy9eEzIkK6yE +HrDBOAN5Cq5MPgcbEZLrnixSKjm6Ti6KUMjhrPfUgfkS64eHtvdtKGoj9nWFqBjA +stmwZyCoYdLaeeuWsiT+m3lGJCH3YKM8dX723OIIUwlsld7kp9I5i6e153HZVow6 +Va+gCwKBgQDg7VUKVHOJdzTRPw+N8xTJEzDsdHu0FhvD5HSZK9vCx05GOo2hC0Qj +1Zn0B+owIaMJmegcIFxoYxSasmZwkZYUyTNoXCa1hrS5D+C7IHZXYlCHhiws0T2v +Ak9fSEKliTov82TzpRCXq6C1NpghF0AIal5cqyUoBNQXn69zMqsKqQKBgQDbicb1 +7DHCpxuC0/fmrlHiPunY4aYVz7CM6d6m0/Jpc1cBKqICkGuRBJlXhhXaCBt1I+vz +DF5GEyPOo80rRYqmuhKAttGcp2uX1B0bSu0N4A3NyuMEOqG1Pw+wNo8SJK7b55B/ +3ZTzS+PL5FEztoGcw3nr8lseLuFVXkzimwRd+wKBgQCV844G4vew+iM2Kgpeog3b +YiCdov0iuWfX7aMQx+fXL4mEJwkWB4LCjmair/F1a8IGZpq9auhFl2MRzERLC7lj +MMzwPW8jryMmCpXSFaQ6QYsFBB1zNikfVt+U2tcZBibKokNNGLW6cmsjebrAuiqw +RumTHMNwAzgPv8pMzHIHGwKBgQCSW9n5SCEsb2esjU/vHuFBf0aQlm65NSBd8T8Z +4qGbojoAxxasYEe2AxDlBA6RWrz4wp1MsumEDMKJwojHg7HEfAxVzzZob50P4r4S +MfNelV6JMeytfGvOKgp1ebS2wx89RQr/6Q33h+0ymDYieau916adTDy+yeuOPt3s +Z1g+pwKBgQC5jdeQ5QVHWdpJnRSgiPsODpQ+AD+5H8ERTIWpb0e4uWfR76eOc2G3 +NLdXvpU31ady05mVS1dCZsZyEytOkG2+AgfyjJsIfwxw+5PZCoV3xm90tb/43l+U +3YyKh/lmCfL8AM73ipXqSN1rQBf4gp4sXxJ0fbProsRQEIqveoyt8g== +-----END RSA PRIVATE KEY----- diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048.pem new file mode 100644 index 0000000000000..f1134bb17ed71 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAtBPRFC+8WpCauAyIr1uCTSK6qtAeevEW1vRkn/KkFQX27UWS +NgU/IukTbA091BDae7HEiWSp7IA1IDbu2q4IwY9UksjF8yFVNZYifr/IzS6lbHOI +ZRxuBzQOWgn0+7WNqzylXQ4y88yVVqSsdfiB8kJHi9o5r+M/3TBOrWCu75iYJeBV +w0nhMYIYOxB0RkPqB1+5z4cgLjyZYuC6iZe+9m718J4LRHTd60lg9wtg4H7RUE3u +VgjLSNpNyvVpOW2qHq+o21gdS7xBQ3pbD619vBWeNDkvCaBp6YZw4ENhUxeg4xaZ +nOrNEKZw4HQnzklDJe1a69InQI6F2b/26VEGgQIDAQABAoIBABKTzMkBV7QcIOoF +2QAGN74PbCR9Dffu8UVBtzPNC2Jj2CKIP9o01luaofdOsmczSebi4vytlt4gJ9rn +7+I9fAfD6pyt+8XmVW0OzQY4cNXCDyzOCm8r7Knvk990EYL6KuBUhFbCRT1jiLCE +koolFfrRHaJu4+6iSg9ekW9PfxyWfAxtEp4XrrqgN4jN3Lrx1rYCZnuYp3Lb+7WI +fJC/rK6MTphUMLbPMvmUwHjFzoe7g9MZxRRY3kY3h1n3Ju1ZbaCbP0Vi/+tdgKAl +290J2MStWWJfOoTNnnOSYhWIPQUiFtuUiab7tJ90GGb1DyLbOrr6wG2awJoqF9ZM +Qwvkf/UCgYEA5dsHhxuX+cTHF6m+aGuyB0pF/cnFccTtwWz2WwiH6tldnOAIPfSi +WJU33C988KwJFmAurcW43VVVs7fxYbC6ptQluEI+/L/3Mj/3WgwZaqX00cEPkzKA +M1XbvanQAU0nGfq+ja7sZVpdbBoBUb6Bh7HFyLM3LgliT0kMQeolKXMCgYEAyI9W +tEHnkHoPjAVSSobBOqCVpTp1WGb7XoxhahjjZcTOgxucna26mUyJeHQrOPp88PJo +xxdDJU410p/tZARtFBoAa++IK9qC6QLjN23CuwhD7y6RNZsRZg0kOCg9SLj+zVj5 +mrvZFf6663EpL82UZ2zUGl4L1sMhYkia0TMjYzsCgYAFHuAIDoFQOyYETO/E+8E3 +kFwGz1vqsOxrBrZmSMZeYQFI4WTNnImRV6Gq8hPieLKrIPFpRaJcq+4A1vQ1rO47 +kTZV6IPmtZAYOnyUMPjP+2p80cQ7D0Dz49HFY+cSYFmipodgOKljiKPUKLAm1guk +rj0tv3BXQjZCdeoj/cdeKQKBgF8u3+hWqs5/j2dVkzN5drUbR0oOT2iwHzZFC2pt ++2XuHFBOx2px6/AbSdbX0zeMccVsVlu+Z4iJ8LNQYTqpexciK/cNzCN75csuKqXA +ur1G8+7Mu++j84LqU7kvJ76exZaxVmygICv3I8DfiLt+JqNbG+KTpay8GNjrOkZ0 +raPHAoGAQ1p/Qvp7DHP2qOnUB/rItEVgWECD3uPx4NOCq7Zcx7mb9p7CI6nePT5y +heHpaJIqVyaS5/LHJDwvdB1nvtlgc9xKa5d1fWhLL3dwFCa98x5PDlN/JztH8DIt +tTlD+8NECIvI+ytbzLS0PZWBYctAR2rP2qlMCGdYerdjwl8S98E= +-----END RSA PRIVATE KEY----- diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-3072.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-3072.pem new file mode 100644 index 0000000000000..7fc26c3b2a535 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-3072.pem @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG4wIBAAKCAYEAv86yDTGazbq0uf6Xji4kdROCQaSmL+Ttse06BVXDiv7nf/13 +wJwt6tQzexw1GXQ5P2I0Mjh1NUr1tFXf3D99laJE209GEVWaQPEdoyRLdVibgH8I +rtt8IhEIHMif8bBXONPwAy4M1HP0AWMIG0WV29ZTgZGmHknI+Rkm1P0bGhod6YBI +sNNY59PjL0tWOf1M8Mj6OOcDS7wBgVMNZSA0MlkUf10Kes4fay/gqDrM2dEHi1lB +7VNQ6nWbSNi5qTPMfhi0bkc64l2fXQ9qQ1BFaFR6AV/EQHQUhFv+AJ62d5vxssi3 +4DgLVz9XVaGXyNHahdxtLGdBzbbZ9OKPXnSGZdfhBxvr3iKj/MDBFq97mzjXisOu +ZljFXx+I5VEiYjgdB2k+wFVOv9XNvwVQdO7p9lX9PJrV6pOKD5tJgh842j3134vA +PaANDaZBbYj+8HN4oDU9Dtvi7INO7G2yXsCiC1xWwczHBhKv+M2cip4eL8XH0OZj +hZi78mDSW1cY4x0xAgMBAAECggGALmijmh+jdh2ztsEMCIHPnmg+/wUIlNQOUxu0 +CzBqJMpyVvyMKhVf4s6/Og1kJ6mAZH6tZG1WprNhaeXRSWgvSbI+eNXgfTc6IHZ2 +lk+k1lq/HUMfZbeDfHakgNpmIMNrBzv4ebx9rDX2FxPQFVEr5kogYFxOVkvCDctx +It2u3gztqd65N9ebTlRtRrcywMsx/5yRNo1mtb9imdjvh8VX+8qj9AEeFQKkhXnL +IMlBYX57FdNd6T9cd65HCUQu/+qhJ7E/cRdoYTG0Hp9bc7pXM7b+6KqonUiMmi1k +SzNlEuYU3SEFKen1K0b8Bk+d/abUaWsqPDPA4jBQye5AdCIMYMazT/KNBGZkNBPu +abmlAut6MDVkwFsLDU5S4tpKqKUUeVmN7jte3BERICMQr3WD6cXd/AtdMrwWaBVm +HTq9Lf7GFUxIDVU1pyGGHkfeXjoK+Ur5DIwJEr6BfhEMC6LGLU3UeWcZOKm8nkc4 +8ue73s+D+mcK4BiI0kRTwXejcnM1AoHBAN9VGvoyvR6s6J5lF59b+B/qQhU4MNfD +vyJS7UxZke/JNWXPKlrij1bZgz2vgjsF+WaPYc1BVKjxRy1fciNlIqaEsjgnxm4M +cHUD8uPeQi8ARPwm+kbRr/IVbD7pFIyKurYraNt3kQQq9aE9fOSOxjbnY+GhGbBM +WnbZp0TH2oUbqHuKRmGUsFIS0tAhfObo4OebHOhiyIB+I7OZQKBU2qVIWyAinjFN +PXiZ9ZT78v0YHZmI29ADEFw0cV1+R04P2wKBwQDb3Rrw+YXPifJDdeaupSno6xJ1 +ACWc0Oof7LVRZC8JJw+eMtgbx9VCcQs811QHJ+q9gXRR3JC38J1JCYJaMcCpG++H +Spl3t2Rsd7dT3RxamYRnUtw6yFUTxYtEkbbViURxh5ghIMEuTyQ7vcV6SXNubGxp +Pc/TQboUZHpehSnJhNiFXsCHgjd7eu/q6cxMqaaqULIie+sfbB39lnbuZ53f1pLZ +7zb2E98m0DtjVstv+NYMoY7DG2DsJ8skw7QNiuMCgcEAhoYI/fRaHoQgimhyVjdb +uj2tGIMESLNMCizRa0/4q+sTEwQ6iww1MydJ+nohg9QRakmrq6tSh4DuUtJPOirN +OGhtwY2T5O3xP0rln4RdcVpEM253Cvl7deKZlTtoeU+HL/vt6WSYIV6PHlSfSj0G +AERY0avsgVk8lKJ+Mtv/MHZ8gg3EXzrlCkr0WRIS3jQgZOH2A7Sc+WkBsEj7uJfk +K/LtkOnJSAEyqdZzKw6oSvOfwL/DSATQcfnU33AVG1xJAoHAP7KYOpZgARe7G3ZO +Be0N7lAkgccwHnWcTvF6OoFm6yTo7nFWkP3dOCmeEttjmcsjxBF8TLc1KkVInD9o +B4+AfL3+MFkZi0iTjKVGdMKLk2gEpxanfVRK6baCubPpn4XsUWPyXC6sKSl+mtxJ +GTuseKJq6jahPlg9e+j8VuQHjj2xqTohV2EPh/O/DHT93nwMMC2+3iS+otTk/3IX +NMzKPW5iD0exyatKLGmJuycLO36BLRmlTbth4ilJPnaAdKf3AoHAANX+2JM3xxc0 +CchCT31ZEnkGRjDtQCZ+gR6BWeWF02HpEXk+scHthD5hUyBwrJkYMTT9wCLkerQl +AioOGJCZkkio/5NDkPmJ80EW/4OcQc/p/uUY5H3PEsLAJBKbb3q30FC0KRlvhg3h +HqyzAOuEjuBJcaCNsN/c+XYIQEbN4I51Zm04HVIDoCYeBs+XRd56foVOSUfW1Dm1 +jgbcxHBaZ1FYeBWoGutBj+BYGVNwD0XpltBVStlinaH48FWKCHiW +-----END RSA PRIVATE KEY----- diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem new file mode 100644 index 0000000000000..a7153f21ee2ae --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAwvYN7bZNnIbifmo4WlYqQPXoCXjLRUspAX8jkViYJH4XIXD6 +lajlKOxvW27D9rIYjfNFShmfI8cf4IrXgrWUgV0TYBi8qm/Ppnr38R8wFy0z9fE6 +anF61vKdxjsW89NhT4QjvDNmOS6B6gK4JxSWo2MNpqxX79IOidc8T6mX8nxLYZek +fb3qXeL8lGp46cwRnwmD0eua8r+9hHU/W4q3LBjVCEarK/8Haj+Mfccn97QAiaeh +44/oALNRK4cUr2L3s0JuiXf3ihLi6yWXZ9LIklB8rvjWaK5IgYRlLE1Kwc/k1WCC +H7IQVLRnZAZxhs/pXN+9XQYgnNDn9Bczj8UMNmzVs3wQQ+TMjpNSSEvUTIs6gfNH +MwiPfbKXjAXKWSJBOfK77mtcUByT2iilW7lwUYRebl+Sh9jJSYzhOdiIrBgtUTtG +C0bEhBHKNYt13C5NRLU2UpxxqDKdlV4iz7eDZONuasY2sU9krKNh631kyHe66EBx +B2IIA48apavlyzHP6TyTw8PM00oL5UOmCsd1oZi1kaJEH0yLdWMvfU/d0mLMlTUF +3bHsoe6zwn8ZhDQ4X781cxxt04j87bNNcDetJ2+4R/GsxyRozetJ3sf1DWYxeux0 ++WbZwCbuFkJPqiLMcSNU7Mzzpqwsz6muRXexZrR0T+Mhfi4mSoKmhwgcRrkCAwEA +AQKCAgEAitUHFCkG2Zkf/t+LI6FKU5oJU31mWwDrwXoVDPKm3Q3BUPoQJ7TvAxZj +MhwNhkZKwALla6AVODgbVh3o29aMWxa5rmvPJhubJjVZDKal60swPko6zAPlct2w +RrJbZOQ43pP9ko6Tk9KvhsHK+2Fo6A6ocPE3absOyU+xThU5895ZG7UN2ND97T+v +l8y+L9c3ESaIbVs2qvipb4LCzGBakhdY1JzmVd01HY8Fb6f7qSPQYjYyTUdz8dO/ +0JxPuZljRI7hAhmg9/z9BHBvgeqTE8YnE751iHPbhAeQuFyOCLgPf3dLEmENyhPx +sEP6YgL6bj2RroWngaQuGR7Qg+fRCkxBfBZGgkZd+WbN6Yv0EK3ktDkCcmi6urx3 +6kOb4BWyRbwZCUMF4Jzo0aQYZSZQuHcl6QZbX6AhCSzPWjMyb7Q5GApbwRNRBcjI +iu8Ts+kXMiWtF7+Bd78JTnr6PPGMiPvjlZCGYTmvzq5/EJmIFvB8XlV3iskb29+w +9Fa/In+/mXuFKPM6Vpc6+FxPzm1P2cP68/jqB3mg3unwx0ecXxjkHBJKcPwNCTaz +nfPxjV+ep9VwyKViJ6SX3OPeMbsNQl6Zr4oCF+qvVZMGsrmd9fpE5NrKA7I+zYBE +6pyGod7cZRiBU41uVO44cFiSTURUPHilwFaTVwb4p2BAHfVKWJUCggEBAPu57609 +SQZ8O4VpiHPDOa0inZJvzZ+0lAo6NOMyhTvY9CPn+YjfYNfol525W+T7JnrvWeic +FIin9jHxnpXK0B/AftUJTw4rEpCio5f79xpFAES0vw+241+4XOI7c9LCOC+FWBiT +4DVfYoob31vyK9+7pRSZ+SiMiu3vAY7seUSIAL0SvpvSbDyzZ+2eVZWBEgW6LVrz +tDE8oNqQUMXA2/FchZmv9ysATBmsTJBo9POVkKNTyUMQzxtslvC6v4oNr7K3OIHb +gcf4TzWXOzn9tG+4xYT9pWW5msGawiUZRcTaAna+E/OgpKWnEnZgCC4hboRLxPLX +53FmOM6M2G7thzsCggEBAMZFZyj++yu5Efce9ccQX6LyfHrqXjNkrU+u9Ed1v5WV +N9ACMmXrHJh5IYa5rhfMM4jWMFWyYbfsCzpfGn4S+5y/fzoFLoryPyH9ygDePA0Q +sRF/PLveyOyVWvP0QtqMjivV40zHpySlUdTfH6DMpEJcutN4Zb28++HVcrwhoC1k +6JAv6lMi0JVZPOQ9PdvWfqWiUFyWL4D2UzzGunCyDZRKhNFFZK8ZMwGrq1GwKbMY +UjR1V446+XmVy1UmJBcsfv8N1GNAmS4JZnIYAA3F79gSaPMouBiGEoq9STZNMV8i +yWX+q2+xF5on0HLiQQDF+CDf7TKQdn9nIb5T9UxH0psCggEBAIXdp5wVT1RnlK+q +I6qMU4vDMNDDLDKWMXQO6Dk6kKf3BD5kCsvDHxky1A5ImJn5Bcmyp7mP50uVJtQA +jjlSlcOM9uYMAUKjnUfVdfJJSGtr7mybQk/1Do3E2YBl5X5bUs3St7q6SS2ZACo6 +EFoxOyvL+kouZ9Ysh2VpCQccspDDUsE5yqvLB0xwjABNh23uKctp6tzHZTgZ6eDR +hmLj4RNGBLZqYaM4kT/F5SGW70zqaPSyhgFoWvtWkB6M8XdVN+5uiVplhWr2ngj9 +171LMj2HoWLMEL08KxRyXHSmL/gzh8Pl6W5SK7z8UFErWK5PkRpBwQbz0lz345XU +SPUU1CkCggEANl19Zd5UnK6IYIxatBSnRJTc0Q9GeS0X4IuGa7m7qJZb0Wtoyk1n +b9928+wh8mCJmPGyGy1oYfgVNBIzv3IRD+/qQMx+8JZv/TALnaVSLfrvsm6DBriV +u1lQsSe+1RJJQXfKdZkhGYlDIO11TLAQMiofCDExJI+XOVs/tJo7w3ax1idtRxJ6 +wxjO+35XYdo0q9IP2vNLXz8rn7SWFqSb2Jd+R6uRafms5L5foWx+Yp8+mye5tEQz +d8U9FcH5j2kiAWmXXQeveNIXBx3RlfU6SLLzmAkIQaExoF06n+sWqJFvS8pC08Np +R1m3bGjhjga6fLkp3v2BoHl05S08S4g+FQKCAQARIy6gUizXPEv8x1Pk70LzHPDQ +y8Og3OozpYd1/AyY2TDPYdRhg4zk+R5JIYLQfsitdH5uzCO+UFwsKwhv76TIqN+D +ht+vEoJ0UXrVG8MQe8VnV7+vdQtYrpYMi/SqnXFQuzaqW3LtWVEjXQFOosNGEtNV +VupzeqsYa5GhhzagCTLMxkgAmyo7/GVa5h4dreiv3hq1xOwbmD7Y6J29NGJwlQPY +7nw4rBtXxbMxThVgktLHuKLBapr2Gulg4tDTj4e1VoM8u9Y34imc5NevoQ32hfvr +cZErq0WhWXvqvmbwRyyf1GRizFSdJ71TxT7EG71IKIMzcapod9UXtXXf/IWF +-----END RSA PRIVATE KEY----- diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-512.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-512.pem new file mode 100644 index 0000000000000..250404e4b7617 --- /dev/null +++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-512.pem @@ -0,0 +1,9 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIBOgIBAAJBAJ5UTbiBg1SYFhLgXPawBf+joDBSVqsWQ4SS2lCt+pLSkit19LK9 +73f5ZYBhZFsszPzPn7bFEGVwsRiegtf5FccCAwEAAQJABykg6rtQyT6noOrsyWtk +mg84a3cN3GcBXrmVjhiQQJubMkUNFE+l0JPNb8kzwzhcm8kzuO92CkqFSLXyQyDP +IQIhAMxkU+gZ5HIStWr0yPMOVoikNjcTW9PGtcUu1zucodilAiEAxk6KGDnHr9Dq +U150jT9aQTlZiZ6lF9kRU/gCcaIHPPsCIQC/6fRfGvDFq4tswittDSlzY70EOckf +MJW8cB7oekn9gQIgGVRg2TVQJ0nlFF8FPiFwctJTeHuWFNS6HOKZ1U/f4s8CIBAA +Kw3ZPC6zxfQNnkI+c96cbTQhQrVPGPlx5fC3l0Ru +-----END RSA PRIVATE KEY----- diff --git a/contrib/exe/BUILD b/contrib/exe/BUILD index b70a786989d7a..1cf08ac421632 100644 --- a/contrib/exe/BUILD +++ b/contrib/exe/BUILD @@ -6,6 +6,8 @@ load( ) load( "//contrib:all_contrib_extensions.bzl", + "ARM64_SKIP_CONTRIB_TARGETS", + "PPC_SKIP_CONTRIB_TARGETS", "envoy_all_contrib_extensions", ) @@ -37,5 +39,9 @@ envoy_cc_test( }, deps = [ "//test/config_test:example_configs_test_lib", - ] + envoy_all_contrib_extensions(), + ] + select({ + "//bazel:linux_aarch64": envoy_all_contrib_extensions(ARM64_SKIP_CONTRIB_TARGETS), + "//bazel:linux_ppc": envoy_all_contrib_extensions(PPC_SKIP_CONTRIB_TARGETS), + "//conditions:default": envoy_all_contrib_extensions(), + }), ) diff --git a/contrib/extensions_metadata.yaml b/contrib/extensions_metadata.yaml index 8614d2dbddb83..215a7936f0604 100644 --- a/contrib/extensions_metadata.yaml +++ b/contrib/extensions_metadata.yaml @@ -33,4 +33,18 @@ envoy.filters.network.postgres_proxy: - envoy.filters.network security_posture: requires_trusted_downstream_and_upstream status: stable - +envoy.filters.network.sip_proxy: + categories: + - envoy.filters.network + security_posture: requires_trusted_downstream_and_upstream + status: alpha +envoy.filters.sip.router: + categories: + - envoy.sip_proxy.filters + security_posture: requires_trusted_downstream_and_upstream + status: alpha +envoy.tls.key_providers.cryptomb: + categories: + - envoy.tls.key_providers + security_posture: robust_to_untrusted_downstream + status: alpha diff --git a/contrib/kafka/filters/network/source/kafka_response.h b/contrib/kafka/filters/network/source/kafka_response.h index 32bc8317f5131..f135f5cacb744 100644 --- a/contrib/kafka/filters/network/source/kafka_response.h +++ b/contrib/kafka/filters/network/source/kafka_response.h @@ -13,7 +13,7 @@ namespace Kafka { * Decides if response with given api key & version should have tagged fields in header. * Bear in mind, that ApiVersions responses DO NOT contain tagged fields in header (despite having * flexible versions) as per - * https://github.com/apache/kafka/blob/2.4.0/clients/src/main/resources/common/message/ApiVersionsResponse.json#L24 + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/resources/common/message/ApiVersionsResponse.json#L24 * This method gets implemented in generated code through 'kafka_response_resolver_cc.j2'. * * @param api_key Kafka request key. diff --git a/contrib/kafka/filters/network/source/kafka_types.h b/contrib/kafka/filters/network/source/kafka_types.h index 3240b9a9c2d6c..d01c304984e4c 100644 --- a/contrib/kafka/filters/network/source/kafka_types.h +++ b/contrib/kafka/filters/network/source/kafka_types.h @@ -31,6 +31,20 @@ using NullableBytes = absl::optional; */ template using NullableArray = absl::optional>; +/** + * Analogous to: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/Uuid.java#L28 + */ +struct Uuid { + + const int64_t msb_; + const int64_t lsb_; + + Uuid(const int64_t msb, const int64_t lsb) : msb_{msb}, lsb_{lsb} {}; + + bool operator==(const Uuid& rhs) const { return msb_ == rhs.msb_ && lsb_ == rhs.lsb_; }; +}; + } // namespace Kafka } // namespace NetworkFilters } // namespace Extensions diff --git a/contrib/kafka/filters/network/source/mesh/BUILD b/contrib/kafka/filters/network/source/mesh/BUILD index f457afee713ea..4d1481f89fcd8 100644 --- a/contrib/kafka/filters/network/source/mesh/BUILD +++ b/contrib/kafka/filters/network/source/mesh/BUILD @@ -109,6 +109,18 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "outbound_record_lib", + srcs = [ + ], + hdrs = [ + "outbound_record.h", + ], + tags = ["skip_on_windows"], + deps = [ + ], +) + envoy_cc_library( name = "upstream_kafka_client_lib", srcs = [ @@ -118,6 +130,7 @@ envoy_cc_library( ], tags = ["skip_on_windows"], deps = [ + ":outbound_record_lib", ], ) @@ -131,6 +144,7 @@ envoy_cc_library( ], tags = ["skip_on_windows"], deps = [ + ":outbound_record_lib", ":upstream_kafka_client_lib", "//envoy/event:dispatcher_interface", "//source/common/common:minimal_logger_lib", diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD b/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD index 6891a3c3ea574..3a1d58d6320a6 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD @@ -20,28 +20,16 @@ envoy_cc_library( ], tags = ["skip_on_windows"], deps = [ - ":produce_outbound_record_lib", ":produce_record_extractor_lib", "//contrib/kafka/filters/network/source:kafka_request_parser_lib", "//contrib/kafka/filters/network/source:kafka_response_parser_lib", "//contrib/kafka/filters/network/source/mesh:abstract_command_lib", + "//contrib/kafka/filters/network/source/mesh:outbound_record_lib", "//contrib/kafka/filters/network/source/mesh:upstream_kafka_facade_lib", "//source/common/common:minimal_logger_lib", ], ) -envoy_cc_library( - name = "produce_outbound_record_lib", - srcs = [ - ], - hdrs = [ - "produce_outbound_record.h", - ], - tags = ["skip_on_windows"], - deps = [ - ], -) - envoy_cc_library( name = "produce_record_extractor_lib", srcs = [ @@ -52,8 +40,8 @@ envoy_cc_library( ], tags = ["skip_on_windows"], deps = [ - ":produce_outbound_record_lib", "//contrib/kafka/filters/network/source:kafka_request_parser_lib", + "//contrib/kafka/filters/network/source/mesh:outbound_record_lib", ], ) diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc index 1fa8cfa8f5b82..31cb53f12a402 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc @@ -38,10 +38,10 @@ AbstractResponseSharedPtr ApiVersionsRequestHolder::computeAnswer() const { request_header_.correlation_id_}; const int16_t error_code = 0; - const ApiVersionsResponseKey produce_entry = {PRODUCE_REQUEST_API_KEY, MIN_PRODUCE_SUPPORTED, - MAX_PRODUCE_SUPPORTED}; - const ApiVersionsResponseKey metadata_entry = {METADATA_REQUEST_API_KEY, MIN_METADATA_SUPPORTED, - MAX_METADATA_SUPPORTED}; + const ApiVersion produce_entry = {PRODUCE_REQUEST_API_KEY, MIN_PRODUCE_SUPPORTED, + MAX_PRODUCE_SUPPORTED}; + const ApiVersion metadata_entry = {METADATA_REQUEST_API_KEY, MIN_METADATA_SUPPORTED, + MAX_METADATA_SUPPORTED}; const ApiVersionsResponse real_response = {error_code, {produce_entry, metadata_entry}}; return std::make_shared>(metadata, real_response); diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc index 05b63b451d1fa..07f402a80802e 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc @@ -33,8 +33,14 @@ AbstractResponseSharedPtr MetadataRequestHolder::computeAnswer() const { advertised_address.second}; std::vector response_topics; if (request_->data_.topics_) { - for (const auto& topic : *(request_->data_.topics_)) { - const std::string& topic_name = topic.name_; + for (const MetadataRequestTopic& topic : *(request_->data_.topics_)) { + if (!topic.name_) { + // The client sent request without topic name (UUID was sent instead). + // We do not know how to handle it, so do not send any metadata. + // This will cause failures in clients downstream. + continue; + } + const std::string& topic_name = *(topic.name_); std::vector topic_partitions; const absl::optional cluster_config = configuration_.computeClusterConfigForTopic(topic_name); diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc index e2ed06fdbb17e..b94b1257c687d 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc @@ -20,19 +20,18 @@ ProduceRequestHolder::ProduceRequestHolder(AbstractRequestListener& filter, const RecordExtractor& record_extractor, const std::shared_ptr> request) : BaseInFlightRequest{filter}, kafka_facade_{kafka_facade}, request_{request} { - outbound_records_ = record_extractor.extractRecords(request_->data_.topics_); + outbound_records_ = record_extractor.extractRecords(request_->data_.topic_data_); expected_responses_ = outbound_records_.size(); } void ProduceRequestHolder::startProcessing() { // Main part of the proxy: for each outbound record we get the appropriate sink (effectively a // facade for upstream Kafka cluster), and send the record to it. - for (const auto& outbound_record : outbound_records_) { + for (const OutboundRecord& outbound_record : outbound_records_) { KafkaProducer& producer = kafka_facade_.getProducerForTopic(outbound_record.topic_); // We need to provide our object as first argument, as we will want to be notified when the // delivery finishes. - producer.send(shared_from_this(), outbound_record.topic_, outbound_record.partition_, - outbound_record.key_, outbound_record.value_); + producer.send(shared_from_this(), outbound_record); } // Corner case handling: // If we ever receive produce request without records, we need to notify the filter we are ready, diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h index 04781366ea90f..d277b6c4bde7b 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h @@ -2,8 +2,8 @@ #include "contrib/kafka/filters/network/source/external/requests.h" #include "contrib/kafka/filters/network/source/mesh/abstract_command.h" -#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h" #include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h" +#include "contrib/kafka/filters/network/source/mesh/outbound_record.h" #include "contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h" #include "contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h" diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc index 3c98dc4885cf9..f7330021fd164 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc @@ -10,11 +10,11 @@ std::vector RecordExtractorImpl::extractRecords(const std::vector& data) const { std::vector result; for (const auto& topic_data : data) { - for (const auto& partition_data : topic_data.partitions_) { + for (const auto& partition_data : topic_data.partition_data_) { // Kafka protocol allows nullable data. if (partition_data.records_) { - const auto topic_result = extractPartitionRecords( - topic_data.name_, partition_data.partition_index_, *(partition_data.records_)); + const auto topic_result = extractPartitionRecords(topic_data.name_, partition_data.index_, + *(partition_data.records_)); std::copy(topic_result.begin(), topic_result.end(), std::back_inserter(result)); } } @@ -152,15 +152,17 @@ OutboundRecord RecordExtractorImpl::extractRecord(const std::string& topic, cons throw EnvoyException(fmt::format("invalid header count in record for [{}-{}]: {}", topic, partition, headers_count)); } + std::vector
headers; + headers.reserve(headers_count); for (int32_t i = 0; i < headers_count; ++i) { - // For now, we ignore headers. - extractByteArray(data); // Header key. - extractByteArray(data); // Header value. + const absl::string_view header_key = extractByteArray(data); + const absl::string_view header_value = extractByteArray(data); + headers.emplace_back(header_key, header_value); } if (data == expected_end_of_record) { // We have consumed everything nicely. - return OutboundRecord{topic, partition, key, value}; + return OutboundRecord{topic, partition, key, value, headers}; } else { // Bad data - there are bytes left. throw EnvoyException(fmt::format("data left after consuming record for [{}-{}]: {}", topic, diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h index 59c6e7380e4fa..f17b79dd54dd9 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h @@ -1,7 +1,7 @@ #pragma once #include "contrib/kafka/filters/network/source/external/requests.h" -#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h" +#include "contrib/kafka/filters/network/source/mesh/outbound_record.h" namespace Envoy { namespace Extensions { diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h b/contrib/kafka/filters/network/source/mesh/outbound_record.h similarity index 73% rename from contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h rename to contrib/kafka/filters/network/source/mesh/outbound_record.h index 4174e3dea7e13..a56baa5d2fb5e 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h +++ b/contrib/kafka/filters/network/source/mesh/outbound_record.h @@ -1,6 +1,8 @@ #pragma once #include +#include +#include #include "absl/strings/string_view.h" @@ -10,6 +12,9 @@ namespace NetworkFilters { namespace Kafka { namespace Mesh { +// Kafka header. +using Header = std::pair; + // Binds a single inbound record from Kafka client with its delivery information. struct OutboundRecord { @@ -18,15 +23,16 @@ struct OutboundRecord { const int32_t partition_; const absl::string_view key_; const absl::string_view value_; + const std::vector
headers_; // These fields will get updated when delivery to upstream Kafka cluster finishes. int16_t error_code_; uint32_t saved_offset_; OutboundRecord(const std::string& topic, const int32_t partition, const absl::string_view key, - const absl::string_view value) - : topic_{topic}, partition_{partition}, key_{key}, value_{value}, error_code_{0}, - saved_offset_{0} {}; + const absl::string_view value, const std::vector
& headers) + : topic_{topic}, partition_{partition}, key_{key}, value_{value}, headers_{headers}, + error_code_{0}, saved_offset_{0} {}; }; } // namespace Mesh diff --git a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h index 24e9b36efdc65..f034f7da4f421 100644 --- a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h +++ b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h @@ -2,10 +2,12 @@ #include #include +#include #include "envoy/common/pure.h" #include "absl/strings/string_view.h" +#include "contrib/kafka/filters/network/source/mesh/outbound_record.h" namespace Envoy { namespace Extensions { @@ -57,15 +59,9 @@ class KafkaProducer { * (error code, offset). * * @param origin origin of payload to be notified when delivery finishes. - * @param topic Kafka topic. - * @param partition Kafka partition (as clients do partitioning, we just reuse what downstream - * gave us). - * @param key Kafka message key. - * @param value Kafka message value. + * @param record record data to be sent. */ - virtual void send(const ProduceFinishCbSharedPtr origin, const std::string& topic, - const int32_t partition, const absl::string_view key, - const absl::string_view value) PURE; + virtual void send(const ProduceFinishCbSharedPtr origin, const OutboundRecord& record) PURE; // Impl leakage: real implementations of Kafka Producer need to stop a monitoring thread, then // they can close the producer. Because the polling thread should not be interrupted, we just mark diff --git a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.cc b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.cc index fd43b61a2cf2b..4d26ad99815d8 100644 --- a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.cc +++ b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.cc @@ -27,18 +27,46 @@ class LibRdKafkaUtilsImpl : public LibRdKafkaUtils { std::string& errstr) const override { return std::unique_ptr(RdKafka::Producer::create(conf, errstr)); } + + // LibRdKafkaUtils + RdKafka::Headers* convertHeaders( + const std::vector>& headers) const override { + RdKafka::Headers* result = RdKafka::Headers::create(); + for (const auto& header : headers) { + const RdKafka::Headers::Header librdkafka_header = { + std::string(header.first), header.second.data(), header.second.length()}; + const auto ec = result->add(librdkafka_header); + // This should never happen ('add' in 1.7.0 does not return any other error codes). + if (RdKafka::ERR_NO_ERROR != ec) { + delete result; + return nullptr; + } + } + return result; + } + + // LibRdKafkaUtils + void deleteHeaders(RdKafka::Headers* librdkafka_headers) const override { + delete librdkafka_headers; + } + +public: + static const LibRdKafkaUtils& getDefaultInstance() { + CONSTRUCT_ON_FIRST_USE(LibRdKafkaUtilsImpl); + } }; RichKafkaProducer::RichKafkaProducer(Event::Dispatcher& dispatcher, Thread::ThreadFactory& thread_factory, const RawKafkaProducerConfig& configuration) - : RichKafkaProducer(dispatcher, thread_factory, configuration, LibRdKafkaUtilsImpl{}){}; + : RichKafkaProducer(dispatcher, thread_factory, configuration, + LibRdKafkaUtilsImpl::getDefaultInstance()){}; RichKafkaProducer::RichKafkaProducer(Event::Dispatcher& dispatcher, Thread::ThreadFactory& thread_factory, const RawKafkaProducerConfig& configuration, const LibRdKafkaUtils& utils) - : dispatcher_{dispatcher} { + : dispatcher_{dispatcher}, utils_{utils} { // Create producer configuration object. std::unique_ptr conf = @@ -79,17 +107,29 @@ RichKafkaProducer::~RichKafkaProducer() { void RichKafkaProducer::markFinished() { poller_thread_active_ = false; } -void RichKafkaProducer::send(const ProduceFinishCbSharedPtr origin, const std::string& topic, - const int32_t partition, const absl::string_view key, - const absl::string_view value) { +void RichKafkaProducer::send(const ProduceFinishCbSharedPtr origin, const OutboundRecord& record) { { - void* value_data = const_cast(value.data()); // Needed for Kafka API. + void* value_data = const_cast(record.value_.data()); // Needed for Kafka API. // Data is a pointer into request internals, and it is going to be managed by // ProduceRequestHolder lifecycle. So we are not going to use any of librdkafka's memory // management. const int flags = 0; - const RdKafka::ErrorCode ec = producer_->produce( - topic, partition, flags, value_data, value.size(), key.data(), key.size(), 0, nullptr); + const int64_t timestamp = 0; + + RdKafka::ErrorCode ec; + // librdkafka requires a raw pointer and deletes it on success. + RdKafka::Headers* librdkafka_headers = utils_.convertHeaders(record.headers_); + if (nullptr != librdkafka_headers) { + ec = producer_->produce(record.topic_, record.partition_, flags, value_data, + record.value_.size(), record.key_.data(), record.key_.size(), + timestamp, librdkafka_headers, nullptr); + } else { + // Headers could not be converted (this should never happen). + ENVOY_LOG(trace, "Header conversion failed while sending to [{}/{}]", record.topic_, + record.partition_); + ec = RdKafka::ERR_UNKNOWN; + } + if (RdKafka::ERR_NO_ERROR == ec) { // We have succeeded with submitting data to producer, so we register a callback. unfinished_produce_requests_.push_back(origin); @@ -97,7 +137,12 @@ void RichKafkaProducer::send(const ProduceFinishCbSharedPtr origin, const std::s // We could not submit data to producer. // Let's treat that as a normal failure (Envoy is a broker after all) and propagate // downstream. - ENVOY_LOG(trace, "Produce failure: {}, while sending to [{}/{}]", ec, topic, partition); + ENVOY_LOG(trace, "Produce failure: {}, while sending to [{}/{}]", ec, record.topic_, + record.partition_); + if (nullptr != librdkafka_headers) { + // Kafka headers need to be deleted manually if produce call fails. + utils_.deleteHeaders(librdkafka_headers); + } const DeliveryMemento memento = {value_data, ec, 0}; origin->accept(memento); } diff --git a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h index 82b3e549c2be3..a73f394cdca10 100644 --- a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h +++ b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h @@ -30,6 +30,13 @@ class LibRdKafkaUtils { virtual std::unique_ptr createProducer(RdKafka::Conf* conf, std::string& errstr) const PURE; + + // Returned type is a raw pointer, as librdkafka does the deletion on successful produce call. + virtual RdKafka::Headers* convertHeaders( + const std::vector>& headers) const PURE; + + // In case of produce failures, we need to dispose of headers manually. + virtual void deleteHeaders(RdKafka::Headers* librdkafka_headers) const PURE; }; using RawKafkaProducerConfig = std::map; @@ -60,9 +67,7 @@ class RichKafkaProducer : public KafkaProducer, void markFinished() override; // KafkaProducer - void send(const ProduceFinishCbSharedPtr origin, const std::string& topic, - const int32_t partition, const absl::string_view key, - const absl::string_view value) override; + void send(const ProduceFinishCbSharedPtr origin, const OutboundRecord& record) override; // This method gets executed by monitoring thread. // Does not finish until this object gets 'markFinished' invoked or gets destroyed. @@ -93,6 +98,9 @@ class RichKafkaProducer : public KafkaProducer, // Monitoring thread that's responsible for continuously polling for new Kafka producer events. Thread::ThreadPtr poller_thread_; + + // Abstracts out pure Kafka operations. + const LibRdKafkaUtils& utils_; }; using RichKafkaProducerPtr = std::unique_ptr; diff --git a/contrib/kafka/filters/network/source/protocol/generator.py b/contrib/kafka/filters/network/source/protocol/generator.py index 846dd2aa2d9b7..2fd18ebc2d69b 100755 --- a/contrib/kafka/filters/network/source/protocol/generator.py +++ b/contrib/kafka/filters/network/source/protocol/generator.py @@ -15,7 +15,7 @@ def generate_main_code(type, main_header_file, resolver_cc_file, metrics_header_ - resolver_cc_file - contains request api key & version mapping to deserializer (from header file) - metrics_header_file - contains metrics with names corresponding to messages """ - processor = StatefulProcessor() + processor = StatefulProcessor(type) # Parse provided input files. messages = processor.parse_messages(input_files) @@ -66,7 +66,7 @@ def generate_test_code( - codec_test_cc_file - tests involving codec and Request/ResponseParserResolver, - utilities_cc_file - utilities for creating sample messages. """ - processor = StatefulProcessor() + processor = StatefulProcessor(type) # Parse provided input files. messages = processor.parse_messages(input_files) @@ -97,7 +97,8 @@ class StatefulProcessor: AlterConfigsResource, what would cause a compile-time error if we were to handle it trivially). """ - def __init__(self): + def __init__(self, type): + self.type = type # Complex types that have been encountered during processing. self.known_types = set() # Name of parent message type that's being processed right now. @@ -107,8 +108,8 @@ def __init__(self): def parse_messages(self, input_files): """ - Parse request/response structures from provided input files. - """ + Parse request/response structures from provided input files. + """ import re import json @@ -123,9 +124,18 @@ def parse_messages(self, input_files): without_comments = re.sub(r'\s*//.*\n', '\n', raw_contents) without_empty_newlines = re.sub( r'^\s*$', '', without_comments, flags=re.MULTILINE) - message_spec = json.loads(without_empty_newlines) - message = self.parse_top_level_element(message_spec) - messages.append(message) + # Windows support: see PR 10542 for details. + amended = re.sub(r'-2147483648', 'INT32_MIN', without_empty_newlines) + # Kafka JSON files are malformed. See KAFKA-12794. + if input_file == 'external/kafka_source/DescribeProducersRequest.json': + amended = amended[:-6] + message_spec = json.loads(amended) + # Adopt publicly available messages only: + # https://kafka.apache.org/28/protocol.html#protocol_api_keys + api_key = message_spec['apiKey'] + if api_key <= 51 or api_key in [56, 57, 60, 61]: + message = self.parse_top_level_element(message_spec) + messages.append(message) except Exception as e: print('could not process %s' % input_file) raise @@ -195,7 +205,9 @@ def parse_complex_type(self, type_name, field_spec, versions): child = self.parse_field(child_field, versions[-1]) if child is not None: fields.append(child) - + # Some structures share the same name, use request/response as prefix. + if type_name in ['EntityData', 'EntryData', 'PartitionData', 'TopicData']: + type_name = self.type.capitalize() + type_name # Some of the types repeat multiple times (e.g. AlterableConfig). # In such a case, every second or later occurrence of the same name is going to be prefixed # with parent type, e.g. we have AlterableConfig (for AlterConfigsRequest) and then @@ -379,7 +391,7 @@ def default_value(self): return str(self.type.default_value()) def example_value_for_test(self, version): - if self.is_nullable(): + if self.is_nullable_in_version(version): return 'absl::make_optional<%s>(%s)' % ( self.type.name, self.type.example_value_for_test(version)) else: @@ -470,7 +482,10 @@ class Primitive(TypeSpecification): Represents a Kafka primitive value. """ - USABLE_PRIMITIVE_TYPE_NAMES = ['bool', 'int8', 'int16', 'int32', 'int64', 'string', 'bytes'] + USABLE_PRIMITIVE_TYPE_NAMES = [ + 'bool', 'int8', 'int16', 'int32', 'int64', 'uint16', 'float64', 'string', 'bytes', + 'records', 'uuid' + ] KAFKA_TYPE_TO_ENVOY_TYPE = { 'string': 'std::string', @@ -479,7 +494,11 @@ class Primitive(TypeSpecification): 'int16': 'int16_t', 'int32': 'int32_t', 'int64': 'int64_t', + 'uint16': 'uint16_t', + 'float64': 'double', 'bytes': 'Bytes', + 'records': 'Bytes', + 'uuid': 'Uuid', 'tagged_fields': 'TaggedFields', } @@ -490,13 +509,18 @@ class Primitive(TypeSpecification): 'int16': 'Int16Deserializer', 'int32': 'Int32Deserializer', 'int64': 'Int64Deserializer', + 'uint16': 'UInt16Deserializer', + 'float64': 'Float64Deserializer', 'bytes': 'BytesDeserializer', + 'records': 'BytesDeserializer', + 'uuid': 'UuidDeserializer', 'tagged_fields': 'TaggedFieldsDeserializer', } KAFKA_TYPE_TO_COMPACT_DESERIALIZER = { 'string': 'CompactStringDeserializer', - 'bytes': 'CompactBytesDeserializer' + 'bytes': 'CompactBytesDeserializer', + 'records': 'CompactBytesDeserializer' } # See https://github.com/apache/kafka/tree/trunk/clients/src/main/resources/common/message#deserializing-messages @@ -508,6 +532,7 @@ class Primitive(TypeSpecification): 'int32': '0', 'int64': '0', 'bytes': '{}', + 'uuid': 'Uuid{0, 0}', 'tagged_fields': 'TaggedFields({})', } @@ -525,8 +550,14 @@ class Primitive(TypeSpecification): 'static_cast(32)', 'int64': 'static_cast(64)', + 'float64': + 'static_cast(13.125)', 'bytes': 'Bytes({0, 1, 2, 3})', + 'records': + 'Bytes({0, 1, 2, 3})', + 'uuid': + 'Uuid{13, 42}', 'tagged_fields': 'TaggedFields{std::vector{{10, Bytes({1, 2, 3})}, {20, Bytes({4, 5, 6})}}}', } @@ -561,7 +592,7 @@ def default_value(self): return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_DEFAULT_VALUE) def has_flexible_handling(self): - return self.original_name in ['string', 'bytes', 'tagged_fields'] + return self.original_name in ['string', 'bytes', 'records', 'tagged_fields'] def example_value_for_test(self, version): return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST) diff --git a/contrib/kafka/filters/network/source/serialization.cc b/contrib/kafka/filters/network/source/serialization.cc index fc8464f7aac0d..b78085fdbf18e 100644 --- a/contrib/kafka/filters/network/source/serialization.cc +++ b/contrib/kafka/filters/network/source/serialization.cc @@ -205,6 +205,21 @@ uint32_t CompactBytesDeserializer::feed(absl::string_view& data) { false); } +uint32_t NullableCompactBytesDeserializer::feed(absl::string_view& data) { + return feedCompactBytesIntoBuffers(data, length_buf_, length_consumed_, required_, + data_buf_, ready_, NULL_COMPACT_BYTES_LENGTH, + true); +} + +NullableBytes NullableCompactBytesDeserializer::get() const { + const uint32_t original_data_len = length_buf_.get(); + if (NULL_COMPACT_BYTES_LENGTH == original_data_len) { + return absl::nullopt; + } else { + return absl::make_optional(data_buf_); + } +} + } // namespace Kafka } // namespace NetworkFilters } // namespace Extensions diff --git a/contrib/kafka/filters/network/source/serialization.h b/contrib/kafka/filters/network/source/serialization.h index 3401199c002f8..7f77de45b4687 100644 --- a/contrib/kafka/filters/network/source/serialization.h +++ b/contrib/kafka/filters/network/source/serialization.h @@ -65,7 +65,7 @@ template class Deserializer { * Generic integer deserializer (uses array of sizeof(T) bytes). * After all bytes are filled in, the value is converted from network byte-order and returned. */ -template class IntDeserializer : public Deserializer { +template class FixedSizeDeserializer : public Deserializer { public: uint32_t feed(absl::string_view& data) override { const uint32_t available = std::min(sizeof(buf_) - written_, data.size()); @@ -92,7 +92,7 @@ template class IntDeserializer : public Deserializer { /** * Integer deserializer for int8_t. */ -class Int8Deserializer : public IntDeserializer { +class Int8Deserializer : public FixedSizeDeserializer { public: int8_t get() const override { int8_t result = buf_[0]; @@ -103,7 +103,7 @@ class Int8Deserializer : public IntDeserializer { /** * Integer deserializer for int16_t. */ -class Int16Deserializer : public IntDeserializer { +class Int16Deserializer : public FixedSizeDeserializer { public: int16_t get() const override { int16_t result; @@ -112,10 +112,22 @@ class Int16Deserializer : public IntDeserializer { } }; +/** + * Integer deserializer for uint16_t. + */ +class UInt16Deserializer : public FixedSizeDeserializer { +public: + uint16_t get() const override { + uint16_t result; + safeMemcpyUnsafeSrc(&result, buf_); + return be16toh(result); + } +}; + /** * Integer deserializer for int32_t. */ -class Int32Deserializer : public IntDeserializer { +class Int32Deserializer : public FixedSizeDeserializer { public: int32_t get() const override { int32_t result; @@ -127,7 +139,7 @@ class Int32Deserializer : public IntDeserializer { /** * Integer deserializer for uint32_t. */ -class UInt32Deserializer : public IntDeserializer { +class UInt32Deserializer : public FixedSizeDeserializer { public: uint32_t get() const override { uint32_t result; @@ -139,7 +151,7 @@ class UInt32Deserializer : public IntDeserializer { /** * Integer deserializer for uint64_t. */ -class Int64Deserializer : public IntDeserializer { +class Int64Deserializer : public FixedSizeDeserializer { public: int64_t get() const override { int64_t result; @@ -148,12 +160,34 @@ class Int64Deserializer : public IntDeserializer { } }; +/** + * Deserializer for Kafka Float64 type. + * Reference: https://kafka.apache.org/28/protocol.html#protocol_types + * Represents a double-precision 64-bit format IEEE 754 value. The values are encoded using eight + * bytes in network byte order (big-endian). + */ +class Float64Deserializer : public FixedSizeDeserializer { + + static_assert(sizeof(double) == sizeof(uint64_t), "sizeof(double) != sizeof(uint64_t)"); + static_assert(std::numeric_limits::is_iec559, "non-IEC559 (IEEE 754) double"); + +public: + double get() const override { + uint64_t in_network_order; + safeMemcpyUnsafeSrc(&in_network_order, buf_); + uint64_t in_host_order = be64toh(in_network_order); + double result; + safeMemcpy(&result, &in_host_order); + return result; + } +}; + /** * Deserializer for boolean values. * Uses a single int8 deserializer, and checks whether the results equals 0. * When reading a boolean value, any non-zero value is considered true. - * Impl note: could have been a subclass of IntDeserializer with a different get function, - * but it makes it harder to understand. + * Impl note: could have been a subclass of FixedSizeDeserializer with a different get + * function, but it makes it harder to understand. */ class BooleanDeserializer : public Deserializer { public: @@ -175,9 +209,10 @@ class BooleanDeserializer : public Deserializer { * https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields#KIP-482:TheKafkaProtocolshouldSupportOptionalTaggedFields-UnsignedVarints * * Impl note: - * This implementation is equivalent to the one present in Kafka 2.4.0, what means that for 5-byte + * This implementation is equivalent to the one present in Kafka, what means that for 5-byte * inputs, the data at bits 5-7 in 5th byte are *ignored* (as long as 8th bit is unset). - * Reference: org.apache.kafka.common.utils.ByteUtils.readUnsignedVarint + * Reference: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/utils/ByteUtils.java#L142 */ class VarUInt32Deserializer : public Deserializer { public: @@ -227,12 +262,13 @@ class VarUInt32Deserializer : public Deserializer { /** * Deserializer for Kafka 'varint' type. - * Encoding documentation: https://kafka.apache.org/24/protocol.html#protocol_types + * Encoding documentation: https://kafka.apache.org/28/protocol.html#protocol_types * * Impl note: - * This implementation is equivalent to the one present in Kafka 2.4.0, what means that for 5-byte + * This implementation is equivalent to the one present in Kafka, what means that for 5-byte * inputs, the data at bits 5-7 in 5th byte are *ignored* (as long as 8th bit is unset). - * Reference: org.apache.kafka.common.utils.ByteUtils.readVarint + * Reference: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/utils/ByteUtils.java#L189 */ class VarInt32Deserializer : public Deserializer { public: @@ -253,12 +289,13 @@ class VarInt32Deserializer : public Deserializer { /** * Deserializer for Kafka 'varlong' type. - * Encoding documentation: https://kafka.apache.org/24/protocol.html#protocol_types + * Encoding documentation: https://kafka.apache.org/28/protocol.html#protocol_types * * Impl note: - * This implementation is equivalent to the one present in Kafka 2.4.0, what means that for 10-byte + * This implementation is equivalent to the one present in Kafka, what means that for 10-byte * inputs, the data at bits 3-7 in 10th byte are *ignored* (as long as 8th bit is unset). - * Reference: org.apache.kafka.common.utils.ByteUtils.readVarlong + * Reference: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/utils/ByteUtils.java#L242 */ class VarInt64Deserializer : public Deserializer { public: @@ -515,6 +552,36 @@ class NullableBytesDeserializer : public Deserializer { bool ready_{false}; }; +/** + * Deserializer of nullable compact bytes value. + * First reads length (UNSIGNED_VARINT32) and then allocates the buffer of given length. + * If length was 0, buffer allocation is omitted and deserializer is immediately ready (returning + * null value). + * + * From Kafka documentation: + * First the length N+1 is given as an UNSIGNED_VARINT. Then N bytes follow. + * A null object is represented with a length of 0. + */ +class NullableCompactBytesDeserializer : public Deserializer { +public: + /** + * Can throw EnvoyException if given bytes length is not valid. + */ + uint32_t feed(absl::string_view& data) override; + + bool ready() const override { return ready_; } + + NullableBytes get() const override; + +private: + VarUInt32Deserializer length_buf_; + bool length_consumed_{false}; + uint32_t required_; + + std::vector data_buf_; + bool ready_{false}; +}; + /** * Deserializer for array of objects of the same type. * @@ -851,6 +918,31 @@ class NullableCompactArrayDeserializer bool ready_{false}; }; +/** + * Kafka UUID is basically two longs, so we are going to keep model them the same way. + * Reference: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/Uuid.java#L38 + */ +class UuidDeserializer : public Deserializer { +public: + uint32_t feed(absl::string_view& data) override { + uint32_t consumed = 0; + consumed += high_bytes_deserializer_.feed(data); + consumed += low_bytes_deserializer_.feed(data); + return consumed; + } + + bool ready() const override { return low_bytes_deserializer_.ready(); } + + Uuid get() const override { + return {high_bytes_deserializer_.get(), low_bytes_deserializer_.get()}; + } + +private: + Int64Deserializer high_bytes_deserializer_; + Int64Deserializer low_bytes_deserializer_; +}; + /** * Encodes provided argument in Kafka format. * In case of primitive types, this is done explicitly as per specification. @@ -961,9 +1053,11 @@ template inline uint32_t EncodingContext::computeSize(const T& arg) COMPUTE_SIZE_OF_NUMERIC_TYPE(bool) COMPUTE_SIZE_OF_NUMERIC_TYPE(int8_t) COMPUTE_SIZE_OF_NUMERIC_TYPE(int16_t) +COMPUTE_SIZE_OF_NUMERIC_TYPE(uint16_t) COMPUTE_SIZE_OF_NUMERIC_TYPE(int32_t) COMPUTE_SIZE_OF_NUMERIC_TYPE(uint32_t) COMPUTE_SIZE_OF_NUMERIC_TYPE(int64_t) +COMPUTE_SIZE_OF_NUMERIC_TYPE(double) /** * Template overload for string. @@ -1019,6 +1113,13 @@ inline uint32_t EncodingContext::computeSize(const NullableArray& arg) const return arg ? computeSize(*arg) : sizeof(int32_t); } +/** + * Template overload for Uuid. + */ +template <> inline uint32_t EncodingContext::computeSize(const Uuid&) const { + return 2 * sizeof(uint64_t); +} + /** * For non-primitive types, call `computeCompactSize` on them, to delegate the work to the entity * itself. The entity may use the information in context to decide which fields are included etc. @@ -1079,6 +1180,14 @@ template <> inline uint32_t EncodingContext::computeCompactSize(const Bytes& arg return computeCompactSize(static_cast(arg.size()) + 1) + arg.size(); } +/** + * Template overload for nullable compact byte array. + * Kafka NullableCompactBytes' size is var-len encoding of N+1 + N bytes. + */ +template <> inline uint32_t EncodingContext::computeCompactSize(const NullableBytes& arg) const { + return arg ? computeCompactSize(*arg) : 1; +} + /** * Template overload for CompactArray of T. * The size of array is compact size of header and all of its elements. @@ -1131,10 +1240,24 @@ template <> inline uint32_t EncodingContext::encode(const int8_t& arg, Buffer::I } ENCODE_NUMERIC_TYPE(int16_t, htobe16); +ENCODE_NUMERIC_TYPE(uint16_t, htobe16); ENCODE_NUMERIC_TYPE(int32_t, htobe32); ENCODE_NUMERIC_TYPE(uint32_t, htobe32); ENCODE_NUMERIC_TYPE(int64_t, htobe64); +/** + * Template overload for double. + * Encodes 8 bytes. + */ +template <> inline uint32_t EncodingContext::encode(const double& arg, Buffer::Instance& dst) { + double tmp = arg; + uint64_t in_host_order; + safeMemcpy(&in_host_order, &tmp); + const uint64_t in_network_order = htobe64(in_host_order); + dst.add(&in_network_order, sizeof(uint64_t)); + return sizeof(uint64_t); +} + /** * Template overload for bool. * Encode boolean as a single byte. @@ -1227,6 +1350,16 @@ uint32_t EncodingContext::encode(const NullableArray& arg, Buffer::Instance& } } +/** + * Template overload for Uuid. + */ +template <> inline uint32_t EncodingContext::encode(const Uuid& arg, Buffer::Instance& dst) { + uint32_t result = 0; + result += encode(arg.msb_, dst); + result += encode(arg.lsb_, dst); + return result; +} + /** * For non-primitive types, call `encodeCompact` on them, to delegate the serialization to the * entity itself. @@ -1309,6 +1442,20 @@ inline uint32_t EncodingContext::encodeCompact(const Bytes& arg, Buffer::Instanc return header_length + data_length; } +/** + * Template overload for NullableBytes. + * Encode byte array as VAR_UINT + N bytes. + */ +template <> +inline uint32_t EncodingContext::encodeCompact(const NullableBytes& arg, Buffer::Instance& dst) { + if (arg.has_value()) { + return encodeCompact(*arg, dst); + } else { + const uint32_t len = 0; + return encodeCompact(len, dst); + } +} + /** * Encode object array of T as VAR_UINT + N elements. * Each element of type T then serializes itself on its own. diff --git a/contrib/kafka/filters/network/source/tagged_fields.h b/contrib/kafka/filters/network/source/tagged_fields.h index f9aebaf7472bf..96e3c72e50bba 100644 --- a/contrib/kafka/filters/network/source/tagged_fields.h +++ b/contrib/kafka/filters/network/source/tagged_fields.h @@ -6,7 +6,7 @@ /** * This header file provides serialization support for tagged fields structure added in 2.4. - * https://github.com/apache/kafka/blob/2.4.0/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java * * Impl note: contrary to other compact data structures, data in tagged field does not have +1 in * data length. diff --git a/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc b/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc index 8d790b14806eb..3f30be33c7e47 100644 --- a/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc +++ b/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc @@ -143,7 +143,7 @@ TEST_F(KafkaBrokerFilterProtocolTest, ShouldProcessMessages) { ASSERT_EQ(result2, Network::FilterStatus::Continue); // Also, assert that every message type has been processed properly. - for (int16_t i = 0; i < MessageUtilities::apiKeys(); ++i) { + for (const int16_t i : MessageUtilities::apiKeys()) { // We should have received one request per api version. const Stats::Counter& request_counter = scope_.counter(MessageUtilities::requestMetric(i)); ASSERT_EQ(request_counter.value(), MessageUtilities::requestApiVersions(i)); diff --git a/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc b/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc index d9ffda89635c8..ba144e3ce74c2 100644 --- a/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc +++ b/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc @@ -39,8 +39,13 @@ TEST(MetadataTest, shouldBeAlwaysReadyForAnswer) { // Second topic is not going to have configuration present. EXPECT_CALL(configuration, computeClusterConfigForTopic("topic2")) .WillOnce(Return(absl::nullopt)); - const RequestHeader header = {0, 0, 0, absl::nullopt}; - const MetadataRequest data = {{MetadataRequestTopic{"topic1"}, MetadataRequestTopic{"topic2"}}}; + const RequestHeader header = {METADATA_REQUEST_API_KEY, METADATA_REQUEST_MAX_VERSION, 0, + absl::nullopt}; + const MetadataRequestTopic t1 = MetadataRequestTopic{"topic1"}; + const MetadataRequestTopic t2 = MetadataRequestTopic{"topic2"}; + // Third topic is not going to have an explicit name. + const MetadataRequestTopic t3 = MetadataRequestTopic{Uuid{13, 42}, absl::nullopt, TaggedFields{}}; + const MetadataRequest data = {{t1, t2, t3}}; const auto message = std::make_shared>(header, data); MetadataRequestHolder testee = {filter, configuration, message}; @@ -61,6 +66,7 @@ TEST(MetadataTest, shouldBeAlwaysReadyForAnswer) { ASSERT_TRUE(response); const auto topics = response->data_.topics_; EXPECT_EQ(topics.size(), 1); + EXPECT_EQ(topics[0].name_, *(t1.name_)); EXPECT_EQ(topics[0].partitions_.size(), 42); } diff --git a/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc b/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc index efa05e82e1f88..f2e1abf4afa1b 100644 --- a/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc +++ b/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc @@ -38,9 +38,7 @@ class MockUpstreamKafkaFacade : public UpstreamKafkaFacade { class MockKafkaProducer : public KafkaProducer { public: - MOCK_METHOD(void, send, - (const ProduceFinishCbSharedPtr, const std::string&, const int32_t, - const absl::string_view, const absl::string_view)); + MOCK_METHOD(void, send, (const ProduceFinishCbSharedPtr, const OutboundRecord&), ()); MOCK_METHOD(void, markFinished, (), ()); }; @@ -83,8 +81,8 @@ TEST_F(ProduceUnitTest, ShouldHandleProduceRequestWithNoRecords) { // The response should contain the values returned by Kafka broker. TEST_F(ProduceUnitTest, ShouldSendRecordsInNormalFlow) { // given - const OutboundRecord r1 = {"t1", 13, "aaa", "bbb"}; - const OutboundRecord r2 = {"t2", 42, "ccc", "ddd"}; + const OutboundRecord r1 = {"t1", 13, "aaa", "bbb", {}}; + const OutboundRecord r2 = {"t2", 42, "ccc", "ddd", {}}; const std::vector records = {r1, r2}; EXPECT_CALL(extractor_, extractRecords(_)).WillOnce(Return(records)); @@ -96,9 +94,9 @@ TEST_F(ProduceUnitTest, ShouldSendRecordsInNormalFlow) { // when, then - invoking should use producers to send records. MockKafkaProducer producer1; - EXPECT_CALL(producer1, send(_, r1.topic_, r1.partition_, _, _)); + EXPECT_CALL(producer1, send(_, _)); MockKafkaProducer producer2; - EXPECT_CALL(producer2, send(_, r2.topic_, r2.partition_, _, _)); + EXPECT_CALL(producer2, send(_, _)); EXPECT_CALL(upstream_kafka_facade_, getProducerForTopic(r1.topic_)) .WillOnce(ReturnRef(producer1)); EXPECT_CALL(upstream_kafka_facade_, getProducerForTopic(r2.topic_)) @@ -128,10 +126,10 @@ TEST_F(ProduceUnitTest, ShouldSendRecordsInNormalFlow) { ASSERT_TRUE(response); const std::vector responses = response->data_.responses_; EXPECT_EQ(responses.size(), 2); - EXPECT_EQ(responses[0].partitions_[0].error_code_, dm1.error_code_); - EXPECT_EQ(responses[0].partitions_[0].base_offset_, dm1.offset_); - EXPECT_EQ(responses[1].partitions_[0].error_code_, dm2.error_code_); - EXPECT_EQ(responses[1].partitions_[0].base_offset_, dm2.offset_); + EXPECT_EQ(responses[0].partition_responses_[0].error_code_, dm1.error_code_); + EXPECT_EQ(responses[0].partition_responses_[0].base_offset_, dm1.offset_); + EXPECT_EQ(responses[1].partition_responses_[0].error_code_, dm2.error_code_); + EXPECT_EQ(responses[1].partition_responses_[0].base_offset_, dm2.offset_); } // Typical flow without errors. @@ -141,8 +139,8 @@ TEST_F(ProduceUnitTest, ShouldSendRecordsInNormalFlow) { // is going to be saved on a bigger offset. TEST_F(ProduceUnitTest, ShouldMergeOutboundRecordResponses) { // given - const OutboundRecord r1 = {"t1", 13, "aaa", "bbb"}; - const OutboundRecord r2 = {r1.topic_, r1.partition_, "ccc", "ddd"}; + const OutboundRecord r1 = {"t1", 13, "aaa", "bbb", {}}; + const OutboundRecord r2 = {r1.topic_, r1.partition_, "ccc", "ddd", {}}; const std::vector records = {r1, r2}; EXPECT_CALL(extractor_, extractRecords(_)).WillOnce(Return(records)); @@ -154,7 +152,7 @@ TEST_F(ProduceUnitTest, ShouldMergeOutboundRecordResponses) { // when, then - invoking should use producers to send records. MockKafkaProducer producer; - EXPECT_CALL(producer, send(_, r1.topic_, r1.partition_, _, _)).Times(2); + EXPECT_CALL(producer, send(_, _)).Times(2); EXPECT_CALL(upstream_kafka_facade_, getProducerForTopic(r1.topic_)) .WillRepeatedly(ReturnRef(producer)); testee->startProcessing(); @@ -182,9 +180,9 @@ TEST_F(ProduceUnitTest, ShouldMergeOutboundRecordResponses) { ASSERT_TRUE(response); const std::vector responses = response->data_.responses_; EXPECT_EQ(responses.size(), 1); - EXPECT_EQ(responses[0].partitions_.size(), 1); - EXPECT_EQ(responses[0].partitions_[0].error_code_, 0); - EXPECT_EQ(responses[0].partitions_[0].base_offset_, 1313); + EXPECT_EQ(responses[0].partition_responses_.size(), 1); + EXPECT_EQ(responses[0].partition_responses_[0].error_code_, 0); + EXPECT_EQ(responses[0].partition_responses_[0].base_offset_, 1313); } // Flow with errors. @@ -195,8 +193,8 @@ TEST_F(ProduceUnitTest, ShouldMergeOutboundRecordResponses) { // proxy (this is going to be amended when we manage to send whole record batch). TEST_F(ProduceUnitTest, ShouldHandleDeliveryErrors) { // given - const OutboundRecord r1 = {"t1", 13, "aaa", "bbb"}; - const OutboundRecord r2 = {r1.topic_, r1.partition_, "ccc", "ddd"}; + const OutboundRecord r1 = {"t1", 13, "aaa", "bbb", {}}; + const OutboundRecord r2 = {r1.topic_, r1.partition_, "ccc", "ddd", {}}; const std::vector records = {r1, r2}; EXPECT_CALL(extractor_, extractRecords(_)).WillOnce(Return(records)); @@ -208,7 +206,7 @@ TEST_F(ProduceUnitTest, ShouldHandleDeliveryErrors) { // when, then - invoking should use producers to send records. MockKafkaProducer producer; - EXPECT_CALL(producer, send(_, r1.topic_, r1.partition_, _, _)).Times(2); + EXPECT_CALL(producer, send(_, _)).Times(2); EXPECT_CALL(upstream_kafka_facade_, getProducerForTopic(r1.topic_)) .WillRepeatedly(ReturnRef(producer)); testee->startProcessing(); @@ -237,7 +235,7 @@ TEST_F(ProduceUnitTest, ShouldHandleDeliveryErrors) { ASSERT_TRUE(response); const std::vector responses = response->data_.responses_; EXPECT_EQ(responses.size(), 1); - EXPECT_EQ(responses[0].partitions_[0].error_code_, dm1.error_code_); + EXPECT_EQ(responses[0].partition_responses_[0].error_code_, dm1.error_code_); } // As with current version of Kafka library we have no capability of linking producer's notification @@ -246,7 +244,7 @@ TEST_F(ProduceUnitTest, ShouldHandleDeliveryErrors) { // did not originate in this request, so it should be ignored. TEST_F(ProduceUnitTest, ShouldIgnoreMementoFromAnotherRequest) { // given - const OutboundRecord r1 = {"t1", 13, "aaa", "bbb"}; + const OutboundRecord r1 = {"t1", 13, "aaa", "bbb", {}}; const std::vector records = {r1}; EXPECT_CALL(extractor_, extractRecords(_)).WillOnce(Return(records)); diff --git a/contrib/kafka/filters/network/test/mesh/integration_test/kafka_mesh_integration_test.py b/contrib/kafka/filters/network/test/mesh/integration_test/kafka_mesh_integration_test.py index f21145cda99a4..53623b655f868 100644 --- a/contrib/kafka/filters/network/test/mesh/integration_test/kafka_mesh_integration_test.py +++ b/contrib/kafka/filters/network/test/mesh/integration_test/kafka_mesh_integration_test.py @@ -15,6 +15,17 @@ import urllib.request +class Message: + """ + Stores data sent to Envoy / Kafka. + """ + + def __init__(self): + self.key = os.urandom(256) + self.value = os.urandom(2048) + self.headers = [('header_' + str(h), os.urandom(128)) for h in range(3)] + + class IntegrationTest(unittest.TestCase): """ All tests in this class depend on Envoy/Zookeeper/Kafka running. @@ -81,28 +92,36 @@ def test_producing(self): producer = KafkaProducer( bootstrap_servers=IntegrationTest.kafka_envoy_address(), api_version=(1, 0, 0)) - offset_to_payload1 = {} - offset_to_payload2 = {} + offset_to_message1 = {} + offset_to_message2 = {} for _ in range(messages_to_send): - payload = bytearray(random.getrandbits(8) for _ in range(5)) + message = Message() future1 = producer.send( - value=payload, topic=partition1.topic, partition=partition1.partition) + key=message.key, + value=message.value, + headers=message.headers, + topic=partition1.topic, + partition=partition1.partition) self.assertTrue(future1.get().offset >= 0) - offset_to_payload1[future1.get().offset] = payload + offset_to_message1[future1.get().offset] = message future2 = producer.send( - value=payload, topic=partition2.topic, partition=partition2.partition) + key=message.key, + value=message.value, + headers=message.headers, + topic=partition2.topic, + partition=partition2.partition) self.assertTrue(future2.get().offset >= 0) - offset_to_payload2[future2.get().offset] = payload - self.assertTrue(len(offset_to_payload1) == messages_to_send) - self.assertTrue(len(offset_to_payload2) == messages_to_send) + offset_to_message2[future2.get().offset] = message + self.assertTrue(len(offset_to_message1) == messages_to_send) + self.assertTrue(len(offset_to_message2) == messages_to_send) producer.close() # Check the target clusters. self.__verify_target_kafka_cluster( - IntegrationTest.kafka_cluster1_address(), partition1, offset_to_payload1, partition2) + IntegrationTest.kafka_cluster1_address(), partition1, offset_to_message1, partition2) self.__verify_target_kafka_cluster( - IntegrationTest.kafka_cluster2_address(), partition2, offset_to_payload2, partition1) + IntegrationTest.kafka_cluster2_address(), partition2, offset_to_message2, partition1) # Check if requests have been received. self.metrics.collect_final_metrics() @@ -123,53 +142,64 @@ def test_producing_with_batched_records(self): api_version=(1, 0, 0), linger_ms=1000, batch_size=100) - future_to_payload1 = {} - future_to_payload2 = {} + future_to_message1 = {} + future_to_message2 = {} for _ in range(messages_to_send): - payload = bytearray(random.getrandbits(8) for _ in range(5)) + message = Message() future1 = producer.send( - value=payload, topic=partition1.topic, partition=partition1.partition) - future_to_payload1[future1] = payload - - payload = bytearray(random.getrandbits(8) for _ in range(5)) + key=message.key, + value=message.value, + headers=message.headers, + topic=partition1.topic, + partition=partition1.partition) + future_to_message1[future1] = message + + message = Message() future2 = producer.send( - value=payload, topic=partition2.topic, partition=partition2.partition) - future_to_payload2[future2] = payload - - offset_to_payload1 = {} - offset_to_payload2 = {} - for future in future_to_payload1.keys(): - offset_to_payload1[future.get().offset] = future_to_payload1[future] + key=message.key, + value=message.value, + headers=message.headers, + topic=partition2.topic, + partition=partition2.partition) + future_to_message2[future2] = message + + offset_to_message1 = {} + offset_to_message2 = {} + for future in future_to_message1.keys(): + offset_to_message1[future.get().offset] = future_to_message1[future] self.assertTrue(future.get().offset >= 0) - for future in future_to_payload2.keys(): - offset_to_payload2[future.get().offset] = future_to_payload2[future] + for future in future_to_message2.keys(): + offset_to_message2[future.get().offset] = future_to_message2[future] self.assertTrue(future.get().offset >= 0) - self.assertTrue(len(offset_to_payload1) == messages_to_send) - self.assertTrue(len(offset_to_payload2) == messages_to_send) + self.assertTrue(len(offset_to_message1) == messages_to_send) + self.assertTrue(len(offset_to_message2) == messages_to_send) producer.close() # Check the target clusters. self.__verify_target_kafka_cluster( - IntegrationTest.kafka_cluster1_address(), partition1, offset_to_payload1, partition2) + IntegrationTest.kafka_cluster1_address(), partition1, offset_to_message1, partition2) self.__verify_target_kafka_cluster( - IntegrationTest.kafka_cluster2_address(), partition2, offset_to_payload2, partition1) + IntegrationTest.kafka_cluster2_address(), partition2, offset_to_message2, partition1) # Check if requests have been received. self.metrics.collect_final_metrics() self.metrics.assert_metric_increase('produce', 1) def __verify_target_kafka_cluster( - self, bootstrap_servers, partition, offset_to_payload_map, other_partition): + self, bootstrap_servers, partition, offset_to_message_map, other_partition): # Check if records were properly forwarded to the cluster. consumer = KafkaConsumer(bootstrap_servers=bootstrap_servers, auto_offset_reset='earliest') consumer.assign([partition]) received_messages = [] - while (len(received_messages) < len(offset_to_payload_map)): + while (len(received_messages) < len(offset_to_message_map)): poll_result = consumer.poll(timeout_ms=1000) received_messages += poll_result[partition] - self.assertTrue(len(received_messages) == len(offset_to_payload_map)) + self.assertTrue(len(received_messages) == len(offset_to_message_map)) for record in received_messages: - self.assertTrue(record.value == offset_to_payload_map[record.offset]) + sent_message = offset_to_message_map[record.offset] + self.assertTrue(record.key == sent_message.key) + self.assertTrue(record.value == sent_message.value) + self.assertTrue(record.headers == sent_message.headers) # Check that no records were incorrectly routed from the "other" partition (they would have created the topics). self.assertTrue(other_partition.topic not in consumer.topics()) diff --git a/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc b/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc index 605019141e707..c49b449138c50 100644 --- a/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc +++ b/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc @@ -94,9 +94,9 @@ TEST_F(RequestProcessorTest, ShouldProcessApiVersionsRequest) { TEST_F(RequestProcessorTest, ShouldHandleUnsupportedRequest) { // given - const RequestHeader header = {LIST_OFFSET_REQUEST_API_KEY, 0, 0, absl::nullopt}; - const ListOffsetRequest data = {0, {}}; - const auto message = std::make_shared>(header, data); + const RequestHeader header = {LIST_OFFSETS_REQUEST_API_KEY, 0, 0, absl::nullopt}; + const ListOffsetsRequest data = {0, {}}; + const auto message = std::make_shared>(header, data); // when, then - exception gets thrown. EXPECT_THROW_WITH_REGEX(testee_.onMessage(message), EnvoyException, "unsupported"); diff --git a/contrib/kafka/filters/network/test/mesh/upstream_kafka_client_impl_unit_test.cc b/contrib/kafka/filters/network/test/mesh/upstream_kafka_client_impl_unit_test.cc index c40ebd8589bc8..d076ce0f841b3 100644 --- a/contrib/kafka/filters/network/test/mesh/upstream_kafka_client_impl_unit_test.cc +++ b/contrib/kafka/filters/network/test/mesh/upstream_kafka_client_impl_unit_test.cc @@ -10,6 +10,7 @@ using testing::_; using testing::AnyNumber; using testing::AtLeast; +using testing::NiceMock; using testing::Return; using testing::ReturnNull; @@ -27,6 +28,16 @@ class MockLibRdKafkaUtils : public LibRdKafkaUtils { (RdKafka::Conf&, RdKafka::DeliveryReportCb*, std::string&), (const)); MOCK_METHOD((std::unique_ptr), createProducer, (RdKafka::Conf*, std::string& errstr), (const)); + MOCK_METHOD(RdKafka::Headers*, convertHeaders, + ((const std::vector>&)), (const)); + MOCK_METHOD(void, deleteHeaders, (RdKafka::Headers * librdkafka_headers), (const)); + + MockLibRdKafkaUtils() { + ON_CALL(*this, convertHeaders(_)).WillByDefault(Return(headers_holder_.get())); + } + +private: + std::unique_ptr headers_holder_{RdKafka::Headers::create()}; }; class MockProduceFinishCb : public ProduceFinishCb { @@ -38,7 +49,7 @@ class UpstreamKafkaClientTest : public testing::Test { protected: Event::MockDispatcher dispatcher_; Thread::ThreadFactory& thread_factory_ = Thread::threadFactoryForTest(); - MockLibRdKafkaUtils kafka_utils_; + NiceMock kafka_utils_{}; RawKafkaProducerConfig config_ = {{"key1", "value1"}, {"key2", "value2"}}; std::unique_ptr producer_ptr = std::make_unique(); @@ -58,9 +69,13 @@ class UpstreamKafkaClientTest : public testing::Test { EXPECT_CALL(producer, poll(_)).Times(AnyNumber()); EXPECT_CALL(kafka_utils_, createProducer(_, _)) .WillOnce(Return(testing::ByMove(std::move(producer_ptr)))); + + EXPECT_CALL(kafka_utils_, deleteHeaders(_)).Times(0); } }; +OutboundRecord makeRecord(const std::string& payload) { return {"topic", 13, payload, "key", {}}; } + TEST_F(UpstreamKafkaClientTest, ShouldConstructWithoutProblems) { // given setupConstructorExpectations(); @@ -75,12 +90,12 @@ TEST_F(UpstreamKafkaClientTest, ShouldSendRecordsAndReceiveConfirmations) { RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; // when, then - should send request without problems. - EXPECT_CALL(producer, produce("t1", 13, 0, _, _, _, _, _, _)) + EXPECT_CALL(producer, produce("topic", 13, _, _, _, _, _, _, _, _)) .Times(3) .WillRepeatedly(Return(RdKafka::ERR_NO_ERROR)); const std::vector payloads = {"value1", "value2", "value3"}; for (const auto& arg : payloads) { - testee.send(origin_, "t1", 13, "KEY", arg); + testee.send(origin_, makeRecord(arg)); } EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), payloads.size()); @@ -99,14 +114,14 @@ TEST_F(UpstreamKafkaClientTest, ShouldCheckCallbacksForDeliveries) { RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; // when, then - should send request without problems. - EXPECT_CALL(producer, produce("t1", 13, 0, _, _, _, _, _, _)) + EXPECT_CALL(producer, produce("topic", 13, _, _, _, _, _, _, _, _)) .Times(2) .WillRepeatedly(Return(RdKafka::ERR_NO_ERROR)); const std::vector payloads = {"value1", "value2"}; auto origin1 = std::make_shared(); auto origin2 = std::make_shared(); - testee.send(origin1, "t1", 13, "KEY", payloads[0]); - testee.send(origin2, "t1", 13, "KEY", payloads[1]); + testee.send(origin1, makeRecord(payloads[0])); + testee.send(origin2, makeRecord(payloads[1])); EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), payloads.size()); // when, then - should process confirmations (notice we pass second memento first). @@ -126,10 +141,11 @@ TEST_F(UpstreamKafkaClientTest, ShouldHandleProduceFailures) { RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; // when, then - if there are problems while sending, notify the source immediately. - EXPECT_CALL(producer, produce("t1", 42, 0, _, _, _, _, _, _)) + EXPECT_CALL(producer, produce("topic", 13, _, _, _, _, _, _, _, _)) .WillOnce(Return(RdKafka::ERR_LEADER_NOT_AVAILABLE)); + EXPECT_CALL(kafka_utils_, deleteHeaders(_)); EXPECT_CALL(*origin_, accept(_)).WillOnce(Return(true)); - testee.send(origin_, "t1", 42, "KEY", "VALUE"); + testee.send(origin_, makeRecord("value")); EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), 0); } @@ -137,13 +153,27 @@ TEST_F(UpstreamKafkaClientTest, ShouldHandleKafkaCallback) { // given setupConstructorExpectations(); RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; - testing::NiceMock message; + NiceMock message; // when, then - notification is passed to dispatcher. EXPECT_CALL(dispatcher_, post(_)); testee.dr_cb(message); } +TEST_F(UpstreamKafkaClientTest, ShouldHandleHeaderConversionFailures) { + // given + setupConstructorExpectations(); + EXPECT_CALL(kafka_utils_, convertHeaders(_)).WillOnce(Return(nullptr)); + + RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; + + // when, then - producer was not interacted with, response was sent immediately. + EXPECT_CALL(producer, produce(_, _, _, _, _, _, _, _, _, _)).Times(0); + EXPECT_CALL(*origin_, accept(_)).WillOnce(Return(true)); + testee.send(origin_, makeRecord("value")); + EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), 0); +} + // This handles situations when users pass bad config to raw producer. TEST_F(UpstreamKafkaClientTest, ShouldThrowIfSettingPropertiesFails) { // given diff --git a/contrib/kafka/filters/network/test/message_utilities.h b/contrib/kafka/filters/network/test/message_utilities.h index 00278094e2cc1..1e8b5bac8bb44 100644 --- a/contrib/kafka/filters/network/test/message_utilities.h +++ b/contrib/kafka/filters/network/test/message_utilities.h @@ -25,10 +25,9 @@ class MessageUtilities { public: /** - * How many request/response types are supported. - * Proper values are 0..apiKeys() - 1. + * What are the supported request / response types. */ - static int16_t apiKeys(); + static std::vector apiKeys(); /** * How many request types are supported for given api key. diff --git a/contrib/kafka/filters/network/test/metrics_integration_test.cc b/contrib/kafka/filters/network/test/metrics_integration_test.cc index 47873fbfaeef6..488befd0c28eb 100644 --- a/contrib/kafka/filters/network/test/metrics_integration_test.cc +++ b/contrib/kafka/filters/network/test/metrics_integration_test.cc @@ -21,7 +21,7 @@ class MetricsIntegrationTest : public testing::Test { constexpr static int32_t UPDATE_COUNT = 42; TEST_F(MetricsIntegrationTest, ShouldUpdateRequestMetrics) { - for (int16_t api_key = 0; api_key < MessageUtilities::apiKeys(); ++api_key) { + for (const int16_t api_key : MessageUtilities::apiKeys()) { // given // when for (int i = 0; i < UPDATE_COUNT; ++i) { @@ -46,7 +46,7 @@ TEST_F(MetricsIntegrationTest, ShouldHandleUnparseableRequest) { } TEST_F(MetricsIntegrationTest, ShouldUpdateResponseMetrics) { - for (int16_t api_key = 0; api_key < MessageUtilities::apiKeys(); ++api_key) { + for (const int16_t api_key : MessageUtilities::apiKeys()) { // given // when for (int i = 0; i < UPDATE_COUNT; ++i) { diff --git a/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 b/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 index a90796c0acc11..3ec7d9f5535e9 100644 --- a/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 @@ -3,6 +3,8 @@ This file contains implementation of request-related methods contained in 'message_utilities.h'. #} +#include + #include "contrib/kafka/filters/network/test/message_utilities.h" #include "contrib/kafka/filters/network/source/external/requests.h" @@ -12,8 +14,12 @@ namespace Extensions { namespace NetworkFilters { namespace Kafka { -int16_t MessageUtilities::apiKeys() { - return {{ message_types | length }}; +std::vector MessageUtilities::apiKeys() { + std::vector result; + {% for message_type in message_types %} + result.push_back({{ message_type.get_extra('api_key') }}); + {% endfor %} + return result; } int16_t MessageUtilities::requestApiVersions(const int16_t api_key) { @@ -30,7 +36,8 @@ int16_t MessageUtilities::requestApiVersions(const int16_t api_key) { std::vector MessageUtilities::makeRequests( const int16_t api_key, int32_t& correlation_id) { - if ((api_key < 0) || (api_key >= {{ message_types | length }})) { + const std::vector api_keys = apiKeys(); + if (std::find(api_keys.begin(), api_keys.end(), api_key) == api_keys.end()) { throw EnvoyException("unsupported api key used in test code"); } @@ -56,7 +63,7 @@ std::vector MessageUtilities::makeRequests( std::vector MessageUtilities::makeAllRequests() { std::vector result; int32_t correlation_id = 0; - for (int16_t i = 0; i < MessageUtilities::apiKeys(); ++i) { + for (const int16_t i : MessageUtilities::apiKeys()) { const std::vector tmp = MessageUtilities::makeRequests(i, correlation_id); result.insert(result.end(), tmp.begin(), tmp.end()); diff --git a/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 b/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 index cf41d02e3ca78..c57d386a5b565 100644 --- a/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 @@ -3,6 +3,8 @@ This file contains implementation of response-related methods contained in 'message_utilities.h'. #} +#include + #include "contrib/kafka/filters/network/test/message_utilities.h" #include "contrib/kafka/filters/network/source/external/responses.h" @@ -26,7 +28,8 @@ int16_t MessageUtilities::responseApiVersions(const int16_t api_key) { std::vector MessageUtilities::makeResponses( const int16_t api_key, int32_t& correlation_id) { - if ((api_key < 0) || (api_key >= {{ message_types | length }})) { + const std::vector api_keys = apiKeys(); + if (std::find(api_keys.begin(), api_keys.end(), api_key) == api_keys.end()) { throw EnvoyException("unsupported api key used in test code"); } @@ -51,7 +54,7 @@ std::vector MessageUtilities::makeResponses( std::vector MessageUtilities::makeAllResponses() { std::vector result; int32_t correlation_id = 0; - for (int16_t i = 0; i < MessageUtilities::apiKeys(); ++i) { + for (const int16_t i : MessageUtilities::apiKeys()) { const std::vector tmp = MessageUtilities::makeResponses(i, correlation_id); result.insert(result.end(), tmp.begin(), tmp.end()); diff --git a/contrib/kafka/filters/network/test/serialization_test.cc b/contrib/kafka/filters/network/test/serialization_test.cc index c177e86364dd7..b9264cf237e47 100644 --- a/contrib/kafka/filters/network/test/serialization_test.cc +++ b/contrib/kafka/filters/network/test/serialization_test.cc @@ -23,9 +23,11 @@ namespace SerializationTest { TEST_EmptyDeserializerShouldNotBeReady(Int8Deserializer); TEST_EmptyDeserializerShouldNotBeReady(Int16Deserializer); +TEST_EmptyDeserializerShouldNotBeReady(UInt16Deserializer); TEST_EmptyDeserializerShouldNotBeReady(Int32Deserializer); TEST_EmptyDeserializerShouldNotBeReady(UInt32Deserializer); TEST_EmptyDeserializerShouldNotBeReady(Int64Deserializer); +TEST_EmptyDeserializerShouldNotBeReady(Float64Deserializer); TEST_EmptyDeserializerShouldNotBeReady(BooleanDeserializer); TEST_EmptyDeserializerShouldNotBeReady(VarUInt32Deserializer); TEST_EmptyDeserializerShouldNotBeReady(VarInt32Deserializer); @@ -38,6 +40,8 @@ TEST_EmptyDeserializerShouldNotBeReady(NullableCompactStringDeserializer); TEST_EmptyDeserializerShouldNotBeReady(BytesDeserializer); TEST_EmptyDeserializerShouldNotBeReady(CompactBytesDeserializer); TEST_EmptyDeserializerShouldNotBeReady(NullableBytesDeserializer); +TEST_EmptyDeserializerShouldNotBeReady(NullableCompactBytesDeserializer); +TEST_EmptyDeserializerShouldNotBeReady(UuidDeserializer); TEST(ArrayDeserializer, EmptyBufferShouldNotBeReady) { // given @@ -77,9 +81,11 @@ TEST(NullableCompactArrayDeserializer, EmptyBufferShouldNotBeReady) { TEST_DeserializerShouldDeserialize(Int8Deserializer, int8_t, 42); TEST_DeserializerShouldDeserialize(Int16Deserializer, int16_t, 42); +TEST_DeserializerShouldDeserialize(UInt16Deserializer, uint16_t, 42); TEST_DeserializerShouldDeserialize(Int32Deserializer, int32_t, 42); TEST_DeserializerShouldDeserialize(UInt32Deserializer, uint32_t, 42); TEST_DeserializerShouldDeserialize(Int64Deserializer, int64_t, 42); +TEST_DeserializerShouldDeserialize(Float64Deserializer, double, 13.25); TEST_DeserializerShouldDeserialize(BooleanDeserializer, bool, true); EncodingContext encoder{-1}; // Provided api_version does not matter for primitive types. @@ -424,7 +430,24 @@ TEST(NullableBytesDeserializer, ShouldThrowOnInvalidLength) { EXPECT_THROW(testee.feed(data), EnvoyException); } -// Generic array tests. +// Nullable compact byte-array tests. + +TEST(NullableCompactBytesDeserializer, ShouldDeserialize) { + const NullableBytes value{{'a', 'b', 'c', 'd'}}; + serializeCompactThenDeserializeAndCheckEquality(value); +} + +TEST(NullableCompactBytesDeserializer, ShouldDeserializeEmptyBytes) { + const NullableBytes value = {{}}; + serializeCompactThenDeserializeAndCheckEquality(value); +} + +TEST(NullableCompactBytesDeserializer, ShouldDeserializeNullBytes) { + const NullableBytes value = absl::nullopt; + serializeCompactThenDeserializeAndCheckEquality(value); +} + +// Generic-array tests. TEST(ArrayDeserializer, ShouldConsumeCorrectAmountOfData) { const std::vector value{{"aaa", "bbbbb", "cc", "d", "e", "ffffffff"}}; @@ -446,7 +469,7 @@ TEST(ArrayDeserializer, ShouldThrowOnInvalidLength) { EXPECT_THROW(testee.feed(data), EnvoyException); } -// Compact generic array tests. +// Compact generic-array tests. TEST(CompactArrayDeserializer, ShouldConsumeCorrectAmountOfData) { const std::vector value{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}}; @@ -469,7 +492,7 @@ TEST(CompactArrayDeserializer, ShouldThrowOnInvalidLength) { EXPECT_THROW(testee.feed(data), EnvoyException); } -// Generic nullable array tests. +// Nullable generic-array tests. TEST(NullableArrayDeserializer, ShouldConsumeCorrectAmountOfData) { const NullableArray value{{"aaa", "bbbbb", "cc", "d", "e", "ffffffff"}}; @@ -496,7 +519,7 @@ TEST(NullableArrayDeserializer, ShouldThrowOnInvalidLength) { EXPECT_THROW(testee.feed(data), EnvoyException); } -// Compact nullable generic array tests. +// Nullable compact generic-array tests. TEST(NullableCompactArrayDeserializer, ShouldConsumeCorrectAmountOfData) { const NullableArray value{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}}; @@ -521,6 +544,13 @@ TEST(NullableCompactArrayDeserializer, ShouldConsumeCorrectAmountOfDataForLargeI NullableCompactArrayDeserializer>(value); } +// UUID. + +TEST(UuidDeserializer, ShouldDeserialize) { + const Uuid value = {13, 42}; + serializeThenDeserializeAndCheckEquality(value); +} + // Tagged fields. TEST(TaggedFieldDeserializer, ShouldConsumeCorrectAmountOfData) { diff --git a/contrib/rocketmq_proxy/filters/network/source/BUILD b/contrib/rocketmq_proxy/filters/network/source/BUILD index b15d7db7e41ba..d274adf6c6d0e 100644 --- a/contrib/rocketmq_proxy/filters/network/source/BUILD +++ b/contrib/rocketmq_proxy/filters/network/source/BUILD @@ -10,8 +10,8 @@ licenses(["notice"]) # Apache 2 envoy_contrib_package() envoy_cc_library( - name = "well_known_names", - hdrs = ["well_known_names.h"], + name = "constant", + hdrs = ["constant.h"], deps = ["//source/common/singleton:const_singleton"], ) @@ -58,8 +58,8 @@ envoy_cc_library( name = "protocol_lib", srcs = ["protocol.cc"], deps = [ + ":constant", ":protocol_interface", - ":well_known_names", "//source/common/common:enum_to_int", ], ) @@ -91,10 +91,10 @@ envoy_cc_library( ], deps = [ ":codec_lib", + ":constant", ":protocol_lib", ":rocketmq_lib", ":stats_interface", - ":well_known_names", "//contrib/rocketmq_proxy/filters/network/source/router:router_interface", "//envoy/buffer:buffer_interface", "//envoy/event:dispatcher_interface", diff --git a/contrib/rocketmq_proxy/filters/network/source/active_message.cc b/contrib/rocketmq_proxy/filters/network/source/active_message.cc index 15e0f505e3f4e..f960a343d8105 100644 --- a/contrib/rocketmq_proxy/filters/network/source/active_message.cc +++ b/contrib/rocketmq_proxy/filters/network/source/active_message.cc @@ -9,8 +9,8 @@ #include "absl/strings/match.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/source/topic_route.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" using Envoy::Tcp::ConnectionPool::ConnectionDataPtr; diff --git a/contrib/rocketmq_proxy/filters/network/source/config.cc b/contrib/rocketmq_proxy/filters/network/source/config.cc index dbb63f91a292a..25d630d6d1239 100644 --- a/contrib/rocketmq_proxy/filters/network/source/config.cc +++ b/contrib/rocketmq_proxy/filters/network/source/config.cc @@ -23,8 +23,8 @@ Network::FilterFactoryCb RocketmqProxyFilterConfigFactory::createFilterFactoryFr Server::Configuration::FactoryContext& context) { std::shared_ptr filter_config = std::make_shared(proto_config, context); return [filter_config, &context](Network::FilterManager& filter_manager) -> void { - filter_manager.addReadFilter( - std::make_shared(*filter_config, context.dispatcher().timeSource())); + filter_manager.addReadFilter(std::make_shared( + *filter_config, context.mainThreadDispatcher().timeSource())); }; } diff --git a/contrib/rocketmq_proxy/filters/network/source/well_known_names.h b/contrib/rocketmq_proxy/filters/network/source/constant.h similarity index 100% rename from contrib/rocketmq_proxy/filters/network/source/well_known_names.h rename to contrib/rocketmq_proxy/filters/network/source/constant.h diff --git a/contrib/rocketmq_proxy/filters/network/source/protocol.cc b/contrib/rocketmq_proxy/filters/network/source/protocol.cc index 7b9ff954798e8..cd0481710ba13 100644 --- a/contrib/rocketmq_proxy/filters/network/source/protocol.cc +++ b/contrib/rocketmq_proxy/filters/network/source/protocol.cc @@ -3,7 +3,7 @@ #include "source/common/common/assert.h" #include "source/common/common/enum_to_int.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" namespace Envoy { namespace Extensions { diff --git a/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc b/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc index 138ff56bd747b..b5b5a59d62d2c 100644 --- a/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc +++ b/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc @@ -6,8 +6,8 @@ #include "contrib/rocketmq_proxy/filters/network/source/active_message.h" #include "contrib/rocketmq_proxy/filters/network/source/codec.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/source/protocol.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" namespace Envoy { namespace Extensions { diff --git a/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc b/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc index 8b98a7be887b6..a471349d05d02 100644 --- a/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc @@ -6,8 +6,8 @@ #include "contrib/rocketmq_proxy/filters/network/source/active_message.h" #include "contrib/rocketmq_proxy/filters/network/source/config.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/source/protocol.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" #include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -24,7 +24,7 @@ class ActiveMessageTest : public testing::Test { ActiveMessageTest() : stats_(RocketmqFilterStats::generateStats("test.", store_)), config_(rocketmq_proxy_config_, factory_context_), - connection_manager_(config_, factory_context_.dispatcher().timeSource()) { + connection_manager_(config_, factory_context_.mainThreadDispatcher().timeSource()) { connection_manager_.initializeReadFilterCallbacks(filter_callbacks_); } diff --git a/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc b/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc index 30b783c4a0d98..4c4a1d3020138 100644 --- a/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc @@ -9,7 +9,7 @@ #include "contrib/rocketmq_proxy/filters/network/source/config.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -54,11 +54,11 @@ class RocketmqConnectionManagerTest : public Event::TestUsingSimulatedTime, publ TestUtility::validate(proto_config_); } config_ = std::make_unique(proto_config_, factory_context_, stats_); - conn_manager_ = - std::make_unique(*config_, factory_context_.dispatcher().timeSource()); + conn_manager_ = std::make_unique( + *config_, factory_context_.mainThreadDispatcher().timeSource()); conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); conn_manager_->onNewConnection(); - current_ = factory_context_.dispatcher().timeSource().monotonicTime(); + current_ = factory_context_.mainThreadDispatcher().timeSource().monotonicTime(); } void initializeCluster() { diff --git a/contrib/rocketmq_proxy/filters/network/test/router_test.cc b/contrib/rocketmq_proxy/filters/network/test/router_test.cc index 3cee446599e4e..e4128ee0d36c6 100644 --- a/contrib/rocketmq_proxy/filters/network/test/router_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/router_test.cc @@ -2,8 +2,8 @@ #include "contrib/rocketmq_proxy/filters/network/source/config.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/source/router/router.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" #include "contrib/rocketmq_proxy/filters/network/test/mocks.h" #include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gtest/gtest.h" @@ -25,7 +25,7 @@ class RocketmqRouterTestBase { cluster_info_(std::make_shared()) { context_.cluster_manager_.initializeThreadLocalClusters({"fake_cluster"}); conn_manager_ = - std::make_unique(config_, context_.dispatcher().timeSource()); + std::make_unique(config_, context_.mainThreadDispatcher().timeSource()); conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); } diff --git a/contrib/sip_proxy/filters/network/source/BUILD b/contrib/sip_proxy/filters/network/source/BUILD new file mode 100644 index 0000000000000..90e7739507d48 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/BUILD @@ -0,0 +1,164 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_contrib_extension", + "envoy_cc_library", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_library( + name = "app_exception_lib", + srcs = ["app_exception_impl.cc"], + hdrs = ["app_exception_impl.h"], + deps = [ + ":protocol_interface", + ":sip_lib", + "//envoy/buffer:buffer_interface", + ], +) + +envoy_cc_contrib_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":app_exception_lib", + ":conn_manager_lib", + ":decoder_lib", + ":protocol_interface", + "//contrib/sip_proxy/filters/network/source/filters:filter_config_interface", + "//contrib/sip_proxy/filters/network/source/filters:well_known_names", + "//contrib/sip_proxy/filters/network/source/router:router_lib", + "//envoy/registry", + "//source/common/config:utility_lib", + "//source/extensions/filters/network/common:factory_base_lib", + "@envoy_api//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "conn_manager_lib", + srcs = ["conn_manager.cc"], + hdrs = ["conn_manager.h"], + external_deps = ["abseil_any"], + deps = [ + ":app_exception_lib", + ":decoder_lib", + ":encoder_lib", + ":protocol_interface", + ":stats_lib", + "//contrib/sip_proxy/filters/network/source/router:router_interface", + "//envoy/event:deferred_deletable", + "//envoy/event:dispatcher_interface", + "//envoy/network:connection_interface", + "//envoy/network:filter_interface", + "//envoy/stats:stats_interface", + "//envoy/stats:timespan_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:linked_object", + "//source/common/common:logger_lib", + "//source/common/network:filter_lib", + "//source/common/stats:timespan_lib", + "//source/common/stream_info:stream_info_lib", + ], +) + +envoy_cc_library( + name = "decoder_events_lib", + hdrs = ["decoder_events.h"], + deps = [ + ":metadata_lib", + ":sip_lib", + ], +) + +envoy_cc_library( + name = "decoder_lib", + srcs = ["decoder.cc"], + hdrs = ["decoder.h"], + deps = [ + ":app_exception_lib", + ":protocol_interface", + ":stats_lib", + "//contrib/sip_proxy/filters/network/source/filters:filter_interface", + "//source/common/buffer:buffer_lib", + ], +) + +envoy_cc_library( + name = "encoder_lib", + srcs = ["encoder.cc"], + hdrs = ["encoder.h"], + deps = [ + ":app_exception_lib", + ":protocol_interface", + ":stats_lib", + "//contrib/sip_proxy/filters/network/source/filters:filter_interface", + "//source/common/buffer:buffer_lib", + ], +) + +envoy_cc_library( + name = "metadata_lib", + hdrs = [ + "metadata.h", + "operation.h", + ], + external_deps = ["abseil_optional"], + deps = [ + ":sip_lib", + "//envoy/buffer:buffer_interface", + "//source/common/common:macros", + "//source/common/http:header_map_lib", + ], +) + +envoy_cc_library( + name = "protocol_interface", + hdrs = [ + "protocol.h", + ], + external_deps = ["abseil_optional"], + deps = [ + ":conn_state_lib", + ":decoder_events_lib", + ":metadata_lib", + ":sip_lib", + "//envoy/buffer:buffer_interface", + "//envoy/config:typed_config_interface", + "//envoy/registry", + "//source/common/common:assert_lib", + "//source/common/config:utility_lib", + "//source/common/singleton:const_singleton", + ], +) + +envoy_cc_library( + name = "stats_lib", + hdrs = ["stats.h"], + deps = [ + "//envoy/stats:stats_interface", + "//envoy/stats:stats_macros", + ], +) + +envoy_cc_library( + name = "conn_state_lib", + hdrs = ["conn_state.h"], + deps = [ + "//envoy/tcp:conn_pool_interface", + ], +) + +envoy_cc_library( + name = "sip_lib", + hdrs = ["sip.h"], + deps = [ + "//source/common/common:assert_lib", + "//source/common/singleton:const_singleton", + ], +) diff --git a/contrib/sip_proxy/filters/network/source/app_exception_impl.cc b/contrib/sip_proxy/filters/network/source/app_exception_impl.cc new file mode 100644 index 0000000000000..e2dcd4953434f --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/app_exception_impl.cc @@ -0,0 +1,21 @@ +#include "contrib/sip_proxy/filters/network/source/app_exception_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +DirectResponse::ResponseType AppException::encode(MessageMetadata& metadata, + Buffer::Instance& buffer) const { + (void)metadata; + (void)buffer; + + // TODO + + return DirectResponse::ResponseType::Exception; +} + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/app_exception_impl.h b/contrib/sip_proxy/filters/network/source/app_exception_impl.h new file mode 100644 index 0000000000000..9d2bc76aa6a46 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/app_exception_impl.h @@ -0,0 +1,27 @@ +#pragma once + +#include "envoy/common/exception.h" + +#include "contrib/sip_proxy/filters/network/source/metadata.h" +#include "contrib/sip_proxy/filters/network/source/protocol.h" +#include "contrib/sip_proxy/filters/network/source/sip.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +struct AppException : public EnvoyException, public DirectResponse { + AppException(AppExceptionType type, const std::string& what) + : EnvoyException(what), type_(type) {} + AppException(const AppException& ex) : EnvoyException(ex.what()), type_(ex.type_) {} + + ResponseType encode(MessageMetadata& metadata, Buffer::Instance& buffer) const override; + + const AppExceptionType type_; +}; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/config.cc b/contrib/sip_proxy/filters/network/source/config.cc new file mode 100644 index 0000000000000..f8247d8729db6 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/config.cc @@ -0,0 +1,135 @@ +#include "contrib/sip_proxy/filters/network/source/config.h" + +#include + +#include "envoy/network/connection.h" +#include "envoy/registry/registry.h" + +#include "source/common/config/utility.h" + +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.pb.validate.h" +#include "contrib/sip_proxy/filters/network/source/decoder.h" +#include "contrib/sip_proxy/filters/network/source/filters/filter_config.h" +#include "contrib/sip_proxy/filters/network/source/filters/well_known_names.h" +#include "contrib/sip_proxy/filters/network/source/router/router_impl.h" +#include "contrib/sip_proxy/filters/network/source/stats.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +namespace { +inline void +addUniqueClusters(absl::flat_hash_set& clusters, + const envoy::extensions::filters::network::sip_proxy::v3alpha::Route& route) { + clusters.emplace(route.route().cluster()); +} +} // namespace + +ProtocolOptionsConfigImpl::ProtocolOptionsConfigImpl( + const envoy::extensions::filters::network::sip_proxy::v3alpha::SipProtocolOptions& config) + : session_affinity_(config.session_affinity()), + registration_affinity_(config.registration_affinity()) {} + +bool ProtocolOptionsConfigImpl::sessionAffinity() const { return session_affinity_; } +bool ProtocolOptionsConfigImpl::registrationAffinity() const { return registration_affinity_; } + +Network::FilterFactoryCb SipProxyFilterConfigFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy& proto_config, + Server::Configuration::FactoryContext& context) { + std::shared_ptr filter_config(new ConfigImpl(proto_config, context)); + + absl::flat_hash_set unique_clusters; + for (auto& route : proto_config.route_config().routes()) { + addUniqueClusters(unique_clusters, route); + } + + /** + * ConnPool::InstanceImpl contains ThreadLocalObject ThreadLocalPool which only can be + * instantiated on main thread. so construct ConnPool::InstanceImpl here. + */ + auto transaction_infos = std::make_shared(); + for (auto& cluster : unique_clusters) { + Stats::ScopePtr stats_scope = + context.scope().createScope(fmt::format("cluster.{}.sip_cluster", cluster)); + auto transaction_info_ptr = std::make_shared( + cluster, context.threadLocal(), + static_cast( + PROTOBUF_GET_MS_OR_DEFAULT(proto_config.settings(), transaction_timeout, 32000)), + proto_config.settings().own_domain(), + proto_config.settings().domain_match_parameter_name()); + transaction_info_ptr->init(); + transaction_infos->emplace(cluster, transaction_info_ptr); + } + + return + [filter_config, &context, transaction_infos](Network::FilterManager& filter_manager) -> void { + filter_manager.addReadFilter(std::make_shared( + *filter_config, context.api().randomGenerator(), + context.mainThreadDispatcher().timeSource(), transaction_infos)); + }; +} + +/** + * Static registration for the sip filter. @see RegisterFactory. + */ +REGISTER_FACTORY(SipProxyFilterConfigFactory, + Server::Configuration::NamedNetworkFilterConfigFactory); + +ConfigImpl::ConfigImpl( + const envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy& config, + Server::Configuration::FactoryContext& context) + : context_(context), stats_prefix_(fmt::format("sip.{}.", config.stat_prefix())), + stats_(SipFilterStats::generateStats(stats_prefix_, context_.scope())), + route_matcher_(new Router::RouteMatcher(config.route_config())), + settings_(std::make_shared( + static_cast( + PROTOBUF_GET_MS_OR_DEFAULT(config.settings(), transaction_timeout, 32000)), + config.settings().own_domain(), config.settings().domain_match_parameter_name())) { + + if (config.sip_filters().empty()) { + ENVOY_LOG(debug, "using default router filter"); + + envoy::extensions::filters::network::sip_proxy::v3alpha::SipFilter router; + router.set_name(SipFilters::SipFilterNames::get().ROUTER); + processFilter(router); + } else { + for (const auto& filter : config.sip_filters()) { + processFilter(filter); + } + } +} + +void ConfigImpl::createFilterChain(SipFilters::FilterChainFactoryCallbacks& callbacks) { + for (const SipFilters::FilterFactoryCb& factory : filter_factories_) { + factory(callbacks); + } +} + +void ConfigImpl::processFilter( + const envoy::extensions::filters::network::sip_proxy::v3alpha::SipFilter& proto_config) { + const std::string& string_name = proto_config.name(); + + ENVOY_LOG(debug, " sip filter #{}", filter_factories_.size()); + ENVOY_LOG(debug, " name: {}", string_name); + ENVOY_LOG(debug, " config: {}", + MessageUtil::getJsonStringFromMessageOrError( + static_cast(proto_config.typed_config()))); + auto& factory = + Envoy::Config::Utility::getAndCheckFactory( + proto_config); + + ProtobufTypes::MessagePtr message = Envoy::Config::Utility::translateAnyToFactoryConfig( + proto_config.typed_config(), context_.messageValidationVisitor(), factory); + SipFilters::FilterFactoryCb callback = + factory.createFilterFactoryFromProto(*message, stats_prefix_, context_); + + filter_factories_.push_back(callback); +} + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/config.h b/contrib/sip_proxy/filters/network/source/config.h new file mode 100644 index 0000000000000..f741bf00bcb17 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/config.h @@ -0,0 +1,99 @@ +#pragma once + +#include +#include + +#include "source/extensions/filters/network/common/factory_base.h" + +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.pb.validate.h" +#include "contrib/sip_proxy/filters/network/source/conn_manager.h" +#include "contrib/sip_proxy/filters/network/source/filters/filter.h" +#include "contrib/sip_proxy/filters/network/source/filters/well_known_names.h" +#include "contrib/sip_proxy/filters/network/source/router/router_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +/** + * Provides Sip-specific cluster options. + */ +class ProtocolOptionsConfigImpl : public ProtocolOptionsConfig { +public: + ProtocolOptionsConfigImpl( + const envoy::extensions::filters::network::sip_proxy::v3alpha::SipProtocolOptions& + proto_config); + + bool sessionAffinity() const override; + bool registrationAffinity() const override; + +private: + bool session_affinity_; + bool registration_affinity_; +}; + +/** + * Config registration for the sip proxy filter. @see NamedNetworkFilterConfigFactory. + */ +class SipProxyFilterConfigFactory + : public Common::FactoryBase< + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy, + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProtocolOptions> { +public: + SipProxyFilterConfigFactory() : FactoryBase(SipProxy, true) {} + +private: + Network::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy& proto_config, + Server::Configuration::FactoryContext& context) override; + Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsTyped( + const envoy::extensions::filters::network::sip_proxy::v3alpha::SipProtocolOptions& + proto_config, + Server::Configuration::ProtocolOptionsFactoryContext&) override { + return std::make_shared(proto_config); + } +}; + +class ConfigImpl : public Config, + public Router::Config, + public SipFilters::FilterChainFactory, + Logger::Loggable { +public: + ConfigImpl(const envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy& config, + Server::Configuration::FactoryContext& context); + + // SipFilters::FilterChainFactory + void createFilterChain(SipFilters::FilterChainFactoryCallbacks& callbacks) override; + + // Router::Config + Router::RouteConstSharedPtr route(MessageMetadata& metadata) const override { + return route_matcher_->route(metadata); + } + + // Config + SipFilterStats& stats() override { return stats_; } + SipFilters::FilterChainFactory& filterFactory() override { return *this; } + Router::Config& routerConfig() override { return *this; } + std::shared_ptr settings() override { return settings_; } + + // Settings +private: + void processFilter( + const envoy::extensions::filters::network::sip_proxy::v3alpha::SipFilter& proto_config); + + Server::Configuration::FactoryContext& context_; + const std::string stats_prefix_; + SipFilterStats stats_; + std::unique_ptr route_matcher_; + + std::list filter_factories_; + + std::shared_ptr settings_; +}; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/conn_manager.cc b/contrib/sip_proxy/filters/network/source/conn_manager.cc new file mode 100644 index 0000000000000..4f28f3f4bb545 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/conn_manager.cc @@ -0,0 +1,338 @@ +#include "contrib/sip_proxy/filters/network/source/conn_manager.h" + +#include "envoy/common/exception.h" +#include "envoy/event/dispatcher.h" + +#include "contrib/sip_proxy/filters/network/source/app_exception_impl.h" +#include "contrib/sip_proxy/filters/network/source/encoder.h" +#include "contrib/sip_proxy/filters/network/source/protocol.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +ConnectionManager::ConnectionManager(Config& config, Random::RandomGenerator& random_generator, + TimeSource& time_source, + std::shared_ptr transaction_infos) + : config_(config), stats_(config_.stats()), decoder_(std::make_unique(*this)), + random_generator_(random_generator), time_source_(time_source), + transaction_infos_(transaction_infos) {} + +ConnectionManager::~ConnectionManager() = default; + +Network::FilterStatus ConnectionManager::onData(Buffer::Instance& data, bool end_stream) { + ENVOY_LOG(trace, "ConnectionManager received data {}\n{}\n", data.length(), data.toString()); + request_buffer_.move(data); + dispatch(); + + if (end_stream) { + ENVOY_CONN_LOG(info, "downstream half-closed", read_callbacks_->connection()); + + resetAllTrans(false); + read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + } + + return Network::FilterStatus::StopIteration; +} + +void ConnectionManager::dispatch() { decoder_->onData(request_buffer_); } + +void ConnectionManager::sendLocalReply(MessageMetadata& metadata, const DirectResponse& response, + bool end_stream) { + if (read_callbacks_->connection().state() == Network::Connection::State::Closed) { + return; + } + + Buffer::OwnedImpl buffer; + const DirectResponse::ResponseType result = response.encode(metadata, buffer); + + Buffer::OwnedImpl response_buffer; + + metadata.setEP(getLocalIp()); + std::shared_ptr encoder = std::make_shared(); + encoder->encode(std::make_shared(metadata), response_buffer); + + read_callbacks_->connection().write(response_buffer, end_stream); + if (end_stream) { + read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + } + + switch (result) { + case DirectResponse::ResponseType::SuccessReply: + stats_.response_success_.inc(); + break; + case DirectResponse::ResponseType::ErrorReply: + stats_.response_error_.inc(); + break; + case DirectResponse::ResponseType::Exception: + stats_.response_exception_.inc(); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +void ConnectionManager::doDeferredTransDestroy(ConnectionManager::ActiveTrans& trans) { + read_callbacks_->connection().dispatcher().deferredDelete( + std::move(transactions_.at(trans.transactionId()))); + transactions_.erase(trans.transactionId()); +} + +void ConnectionManager::resetAllTrans(bool local_reset) { + ENVOY_LOG(info, "active_trans to be deleted {}", transactions_.size()); + for (auto it = transactions_.cbegin(); it != transactions_.cend();) { + if (local_reset) { + ENVOY_CONN_LOG(debug, "local close with active request", read_callbacks_->connection()); + stats_.cx_destroy_local_with_active_rq_.inc(); + } else { + ENVOY_CONN_LOG(debug, "remote close with active request", read_callbacks_->connection()); + stats_.cx_destroy_remote_with_active_rq_.inc(); + } + + (it++)->second->onReset(); + } +} + +void ConnectionManager::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { + read_callbacks_ = &callbacks; + + read_callbacks_->connection().addConnectionCallbacks(*this); + read_callbacks_->connection().enableHalfClose(true); +} + +void ConnectionManager::onEvent(Network::ConnectionEvent event) { + ENVOY_CONN_LOG(info, "received event {}", read_callbacks_->connection(), event); + resetAllTrans(event == Network::ConnectionEvent::LocalClose); +} + +DecoderEventHandler& ConnectionManager::newDecoderEventHandler(MessageMetadataSharedPtr metadata) { + ENVOY_LOG(trace, "new decoder filter"); + std::string&& k = std::string(metadata->transactionId().value()); + if (metadata->methodType() == MethodType::Ack) { + if (transactions_.find(k) != transactions_.end()) { + // ACK_4XX + return *transactions_.at(k); + } + } + + ActiveTransPtr new_trans = std::make_unique(*this, metadata); + new_trans->createFilterChain(); + transactions_.emplace(k, std::move(new_trans)); + + return *transactions_.at(k); +} + +bool ConnectionManager::ResponseDecoder::onData(MessageMetadataSharedPtr metadata) { + metadata_ = metadata; + if (auto status = transportBegin(metadata_); status == FilterStatus::StopIteration) { + return true; + } + + if (auto status = messageBegin(metadata_); status == FilterStatus::StopIteration) { + return true; + } + + if (auto status = messageEnd(); status == FilterStatus::StopIteration) { + return true; + } + + if (auto status = transportEnd(); status == FilterStatus::StopIteration) { + return true; + } + + return true; +} + +FilterStatus ConnectionManager::ResponseDecoder::messageBegin(MessageMetadataSharedPtr metadata) { + UNREFERENCED_PARAMETER(metadata); + return FilterStatus::Continue; +} + +FilterStatus ConnectionManager::ResponseDecoder::messageEnd() { return FilterStatus::Continue; } + +FilterStatus ConnectionManager::ResponseDecoder::transportEnd() { + ASSERT(metadata_ != nullptr); + + ConnectionManager& cm = parent_.parent_; + + if (cm.read_callbacks_->connection().state() == Network::Connection::State::Closed) { + throw EnvoyException("downstream connection is closed"); + } + + Buffer::OwnedImpl buffer; + + metadata_->setEP(getLocalIp()); + std::shared_ptr encoder = std::make_shared(); + encoder->encode(metadata_, buffer); + + ENVOY_LOG(trace, "send response {}\n{}", buffer.length(), buffer.toString()); + cm.read_callbacks_->connection().write(buffer, false); + + cm.stats_.response_.inc(); + + return FilterStatus::Continue; +} + +FilterStatus ConnectionManager::ActiveTrans::applyDecoderFilters(ActiveTransDecoderFilter* filter) { + ASSERT(filter_action_ != nullptr); + + if (!local_response_sent_) { + std::list::iterator entry; + if (!filter) { + entry = decoder_filters_.begin(); + } else { + entry = std::next(filter->entry()); + } + + for (; entry != decoder_filters_.end(); entry++) { + const FilterStatus status = filter_action_((*entry)->handle_.get()); + if (local_response_sent_) { + // The filter called sendLocalReply: stop processing filters and return + // FilterStatus::Continue irrespective of the current result. + break; + } + + if (status != FilterStatus::Continue) { + return status; + } + } + } + + filter_action_ = nullptr; + filter_context_.reset(); + + return FilterStatus::Continue; +} + +FilterStatus ConnectionManager::ActiveTrans::transportBegin(MessageMetadataSharedPtr metadata) { + filter_context_ = metadata; + filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus { + MessageMetadataSharedPtr metadata = absl::any_cast(filter_context_); + return filter->transportBegin(metadata); + }; + + return applyDecoderFilters(nullptr); +} + +FilterStatus ConnectionManager::ActiveTrans::transportEnd() { + ASSERT(metadata_ != nullptr); + parent_.stats_.request_.inc(); + + FilterStatus status; + filter_action_ = [](DecoderEventHandler* filter) -> FilterStatus { + return filter->transportEnd(); + }; + + status = applyDecoderFilters(nullptr); + if (status == FilterStatus::StopIteration) { + return status; + } + + finalizeRequest(); + + return status; +} + +void ConnectionManager::ActiveTrans::finalizeRequest() {} + +FilterStatus ConnectionManager::ActiveTrans::messageBegin(MessageMetadataSharedPtr metadata) { + metadata_ = metadata; + filter_context_ = metadata; + filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus { + MessageMetadataSharedPtr metadata = absl::any_cast(filter_context_); + return filter->messageBegin(metadata); + }; + + return applyDecoderFilters(nullptr); +} + +FilterStatus ConnectionManager::ActiveTrans::messageEnd() { + filter_action_ = [](DecoderEventHandler* filter) -> FilterStatus { return filter->messageEnd(); }; + return applyDecoderFilters(nullptr); +} + +void ConnectionManager::ActiveTrans::createFilterChain() { + parent_.config_.filterFactory().createFilterChain(*this); +} + +void ConnectionManager::ActiveTrans::onReset() { parent_.doDeferredTransDestroy(*this); } + +void ConnectionManager::ActiveTrans::onError(const std::string& what) { + if (metadata_) { + sendLocalReply(AppException(AppExceptionType::ProtocolError, what), true); + return; + } + + parent_.doDeferredTransDestroy(*this); + parent_.read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); +} + +const Network::Connection* ConnectionManager::ActiveTrans::connection() const { + return &parent_.read_callbacks_->connection(); +} + +Router::RouteConstSharedPtr ConnectionManager::ActiveTrans::route() { + if (!cached_route_) { + if (metadata_ != nullptr) { + Router::RouteConstSharedPtr route = parent_.config_.routerConfig().route(*metadata_); + cached_route_ = std::move(route); + } else { + cached_route_ = nullptr; + } + } + + return cached_route_.value(); +} + +void ConnectionManager::ActiveTrans::sendLocalReply(const DirectResponse& response, + bool end_stream) { + parent_.sendLocalReply(*metadata_, response, end_stream); + + if (end_stream) { + return; + } + + // Consume any remaining request data from the downstream. + local_response_sent_ = true; +} + +void ConnectionManager::ActiveTrans::startUpstreamResponse() { + response_decoder_ = std::make_unique(*this); +} + +SipFilters::ResponseStatus +ConnectionManager::ActiveTrans::upstreamData(MessageMetadataSharedPtr metadata) { + ASSERT(response_decoder_ != nullptr); + + try { + if (response_decoder_->onData(metadata)) { + // Completed upstream response. + // parent_.doDeferredRpcDestroy(*this); + return SipFilters::ResponseStatus::Complete; + } + return SipFilters::ResponseStatus::MoreData; + } catch (const AppException& ex) { + ENVOY_LOG(error, "sip response application error: {}", ex.what()); + // parent_.stats_.response_decoding_error_.inc(); + + sendLocalReply(ex, true); + return SipFilters::ResponseStatus::Reset; + } catch (const EnvoyException& ex) { + ENVOY_CONN_LOG(error, "sip response error: {}", parent_.read_callbacks_->connection(), + ex.what()); + // parent_.stats_.response_decoding_error_.inc(); + + onError(ex.what()); + return SipFilters::ResponseStatus::Reset; + } +} + +void ConnectionManager::ActiveTrans::resetDownstreamConnection() { + parent_.read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); +} + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/conn_manager.h b/contrib/sip_proxy/filters/network/source/conn_manager.h new file mode 100644 index 0000000000000..212efe605ff0b --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/conn_manager.h @@ -0,0 +1,282 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/common/random_generator.h" +#include "envoy/event/deferred_deletable.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/timespan.h" + +#include "source/common/buffer/buffer_impl.h" +#include "source/common/common/linked_object.h" +#include "source/common/common/logger.h" +#include "source/common/stats/timespan_impl.h" +#include "source/common/stream_info/stream_info_impl.h" + +#include "absl/types/any.h" +#include "contrib/sip_proxy/filters/network/source/decoder.h" +#include "contrib/sip_proxy/filters/network/source/filters/filter.h" +#include "contrib/sip_proxy/filters/network/source/protocol.h" +#include "contrib/sip_proxy/filters/network/source/stats.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +/** + * Config is a configuration interface for ConnectionManager. + */ +class SipSettings; +class Config { +public: + virtual ~Config() = default; + + virtual SipFilters::FilterChainFactory& filterFactory() PURE; + virtual SipFilterStats& stats() PURE; + virtual Router::Config& routerConfig() PURE; + virtual std::shared_ptr settings() PURE; +}; + +/** + * Extends Upstream::ProtocolOptionsConfig with Sip-specific cluster options. + */ +class ProtocolOptionsConfig : public Upstream::ProtocolOptionsConfig { +public: + ~ProtocolOptionsConfig() override = default; + + virtual bool sessionAffinity() const PURE; + virtual bool registrationAffinity() const PURE; +}; + +/** + * ConnectionManager is a Network::Filter that will perform Sip request handling on a connection. + */ +class ConnectionManager : public Network::ReadFilter, + public Network::ConnectionCallbacks, + public DecoderCallbacks, + Logger::Loggable { +public: + ConnectionManager(Config& config, Random::RandomGenerator& random_generator, + TimeSource& time_system, + std::shared_ptr transaction_infos); + ~ConnectionManager() override; + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; } + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + // DecoderCallbacks + DecoderEventHandler& newDecoderEventHandler(MessageMetadataSharedPtr metadata) override; + + absl::string_view getLocalIp() override { + // should return local address ip + // But after ORIGINAL_DEST, the local address update to upstream local address + // So here get downstream remote IP, which should in same pod car with envoy + ENVOY_LOG(debug, "Local ip: {}", + read_callbacks_->connection() + .connectionInfoProvider() + .localAddress() + ->ip() + ->addressAsString()); + return read_callbacks_->connection() + .connectionInfoProvider() + .localAddress() + ->ip() + ->addressAsString(); + } + + std::string getOwnDomain() override { return config_.settings()->ownDomain(); } + + std::string getDomainMatchParamName() override { + return config_.settings()->domainMatchParamName(); + } + +private: + friend class SipConnectionManagerTest; + struct ActiveTrans; + + struct ResponseDecoder : public DecoderCallbacks, public DecoderEventHandler { + ResponseDecoder(ActiveTrans& parent) : parent_(parent) {} + + bool onData(MessageMetadataSharedPtr metadata); + + // DecoderEventHandler + FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override; + FilterStatus messageEnd() override; + FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override { + UNREFERENCED_PARAMETER(metadata); + return FilterStatus::Continue; + } + FilterStatus transportEnd() override; + + // DecoderCallbacks + DecoderEventHandler& newDecoderEventHandler(MessageMetadataSharedPtr metadata) override { + UNREFERENCED_PARAMETER(metadata); + return *this; + } + + absl::string_view getLocalIp() override { + // should return local address ip + // But after ORIGINAL_DEST, the local address update to upstream local address + // So here get downstream remote IP, which should in same pod car with envoy + return parent_.parent_.getLocalIp(); + } + + std::string getOwnDomain() override { return parent_.parent_.getOwnDomain(); } + + std::string getDomainMatchParamName() override { + return parent_.parent_.getDomainMatchParamName(); + } + + ActiveTrans& parent_; + MessageMetadataSharedPtr metadata_; + }; + using ResponseDecoderPtr = std::unique_ptr; + + // Wraps a DecoderFilter and acts as the DecoderFilterCallbacks for the filter, enabling filter + // chain continuation. + struct ActiveTransDecoderFilter : public SipFilters::DecoderFilterCallbacks, + LinkedObject { + ActiveTransDecoderFilter(ActiveTrans& parent, SipFilters::DecoderFilterSharedPtr filter) + : parent_(parent), handle_(filter) {} + + // SipFilters::DecoderFilterCallbacks + uint64_t streamId() const override { return parent_.streamId(); } + std::string transactionId() const override { return parent_.transactionId(); } + const Network::Connection* connection() const override { return parent_.connection(); } + Router::RouteConstSharedPtr route() override { return parent_.route(); } + void sendLocalReply(const DirectResponse& response, bool end_stream) override { + parent_.sendLocalReply(response, end_stream); + } + void startUpstreamResponse() override { parent_.startUpstreamResponse(); } + SipFilters::ResponseStatus upstreamData(MessageMetadataSharedPtr metadata) override { + return parent_.upstreamData(metadata); + } + void resetDownstreamConnection() override { parent_.resetDownstreamConnection(); } + StreamInfo::StreamInfo& streamInfo() override { return parent_.streamInfo(); } + std::shared_ptr transactionInfos() override { + return parent_.transactionInfos(); + } + std::shared_ptr settings() override { return parent_.settings(); } + void onReset() override { return parent_.onReset(); } + + ActiveTrans& parent_; + SipFilters::DecoderFilterSharedPtr handle_; + }; + using ActiveTransDecoderFilterPtr = std::unique_ptr; + + // ActiveTrans tracks request/response pairs. + struct ActiveTrans : LinkedObject, + public Event::DeferredDeletable, + public DecoderEventHandler, + public SipFilters::DecoderFilterCallbacks, + public SipFilters::FilterChainFactoryCallbacks { + ActiveTrans(ConnectionManager& parent, MessageMetadataSharedPtr metadata) + : parent_(parent), request_timer_(new Stats::HistogramCompletableTimespanImpl( + parent_.stats_.request_time_ms_, parent_.time_source_)), + stream_id_(parent_.random_generator_.random()), + transaction_id_(metadata->transactionId().value()), + stream_info_(parent_.time_source_, + parent_.read_callbacks_->connection().connectionInfoProviderSharedPtr()), + metadata_(metadata), local_response_sent_(false) { + parent_.stats_.request_active_.inc(); + } + ~ActiveTrans() override { + request_timer_->complete(); + ENVOY_LOG(trace, "destruct activetrans {}", transaction_id_); + parent_.stats_.request_active_.dec(); + + for (auto& filter : decoder_filters_) { + filter->handle_->onDestroy(); + } + } + + // DecoderEventHandler + FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override; + FilterStatus transportEnd() override; + FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override; + FilterStatus messageEnd() override; + + // SipFilters::DecoderFilterCallbacks + uint64_t streamId() const override { return stream_id_; } + std::string transactionId() const override { return transaction_id_; } + const Network::Connection* connection() const override; + Router::RouteConstSharedPtr route() override; + void sendLocalReply(const DirectResponse& response, bool end_stream) override; + void startUpstreamResponse() override; + SipFilters::ResponseStatus upstreamData(MessageMetadataSharedPtr metadata) override; + void resetDownstreamConnection() override; + StreamInfo::StreamInfo& streamInfo() override { return stream_info_; } + + std::shared_ptr transactionInfos() override { + return parent_.transaction_infos_; + } + std::shared_ptr settings() override { return parent_.config_.settings(); } + void onReset() override; + + // Sip::FilterChainFactoryCallbacks + void addDecoderFilter(SipFilters::DecoderFilterSharedPtr filter) override { + ActiveTransDecoderFilterPtr wrapper = + std::make_unique(*this, filter); + filter->setDecoderFilterCallbacks(*wrapper); + LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_); + } + + FilterStatus applyDecoderFilters(ActiveTransDecoderFilter* filter); + void finalizeRequest(); + + void createFilterChain(); + void onError(const std::string& what); + + ConnectionManager& parent_; + Stats::TimespanPtr request_timer_; + uint64_t stream_id_; + std::string transaction_id_; + StreamInfo::StreamInfoImpl stream_info_; + MessageMetadataSharedPtr metadata_; + std::list decoder_filters_; + ResponseDecoderPtr response_decoder_; + absl::optional cached_route_; + std::function filter_action_; + + absl::any filter_context_; + bool local_response_sent_ : 1; + + /* Used by Router */ + std::shared_ptr transaction_infos_; + }; + + using ActiveTransPtr = std::unique_ptr; + + void dispatch(); + void sendLocalReply(MessageMetadata& metadata, const DirectResponse& response, bool end_stream); + void doDeferredTransDestroy(ActiveTrans& trans); + void resetAllTrans(bool local_reset); + + Config& config_; + SipFilterStats& stats_; + + Network::ReadFilterCallbacks* read_callbacks_{}; + + DecoderPtr decoder_; + absl::flat_hash_map transactions_; + Buffer::OwnedImpl request_buffer_; + Random::RandomGenerator& random_generator_; + TimeSource& time_source_; + + // This is used in Router, put here to pass to Router + std::shared_ptr transaction_infos_; + std::shared_ptr sip_settings_; +}; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/conn_state.h b/contrib/sip_proxy/filters/network/source/conn_state.h new file mode 100644 index 0000000000000..8b1ac41609719 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/conn_state.h @@ -0,0 +1,47 @@ +#pragma once + +#include "envoy/tcp/conn_pool.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +enum class ConnectionState { + NotConnected, + Connecting, + Connected, +}; + +/** Not used + * SipConnectionState tracks sip-related connection state for pooled + * connections. + */ +// class SipConnectionState : public Tcp::ConnectionPool::ConnectionState { +// public: +// SipConnectionState(SipProxy::ConnectionState state, int32_t initial_sequence_id = 0) +// : state_(state), next_sequence_id_(initial_sequence_id) {} +// +// /** +// * @return int32_t the next Sip sequence id to use for this connection. +// */ +// int32_t nextSequenceId() { +// if (next_sequence_id_ == std::numeric_limits::max()) { +// next_sequence_id_ = 0; +// return std::numeric_limits::max(); +// } +// +// return next_sequence_id_++; +// } +// +// SipProxy::ConnectionState state() { return state_; } +// +// private: +// SipProxy::ConnectionState state_; +// int32_t next_sequence_id_; +//}; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/decoder.cc b/contrib/sip_proxy/filters/network/source/decoder.cc new file mode 100644 index 0000000000000..28f2b661ade1d --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/decoder.cc @@ -0,0 +1,614 @@ +#include "contrib/sip_proxy/filters/network/source/decoder.h" + +#include "envoy/buffer/buffer.h" +#include "envoy/common/exception.h" + +#include "source/common/buffer/buffer_impl.h" +#include "source/common/common/assert.h" +#include "source/common/common/macros.h" + +#include "contrib/sip_proxy/filters/network/source/app_exception_impl.h" +#include "re2/re2.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +DecoderStateMachine::DecoderStatus DecoderStateMachine::transportBegin() { + return {State::MessageBegin, handler_.transportBegin(metadata_)}; +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::messageBegin() { + return {State::MessageEnd, handler_.messageBegin(metadata_)}; +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::messageEnd() { + return {State::TransportEnd, handler_.messageEnd()}; +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::transportEnd() { + return {State::Done, handler_.transportEnd()}; +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::handleState() { + switch (state_) { + case State::TransportBegin: + return transportBegin(); + case State::MessageBegin: + return messageBegin(); + case State::MessageEnd: + return messageEnd(); + case State::TransportEnd: + return transportEnd(); + default: + /* test failed report "panic: not reached" if reach here */ + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +State DecoderStateMachine::run() { + while (state_ != State::Done) { + ENVOY_LOG(trace, "sip: state {}", StateNameValues::name(state_)); + + DecoderStatus s = handleState(); + + state_ = s.next_state_; + + ASSERT(s.filter_status_.has_value()); + if (s.filter_status_.value() == FilterStatus::StopIteration) { + return State::StopIteration; + } + } + + return state_; +} + +Decoder::Decoder(DecoderCallbacks& callbacks) : callbacks_(callbacks) {} + +void Decoder::complete() { + request_.reset(); + metadata_.reset(); + state_machine_ = nullptr; + start_new_message_ = true; + + current_header_ = HeaderType::TopLine; + raw_offset_ = 0; + + first_via_ = true; + first_route_ = true; +} + +FilterStatus Decoder::onData(Buffer::Instance& data) { + ENVOY_LOG(debug, "sip: {} bytes available", data.length()); + + reassemble(data); + return FilterStatus::StopIteration; +} + +int Decoder::reassemble(Buffer::Instance& data) { + // ENVOY_LOG(trace, "received --> {}\n{}", data.length(), data.toString()); + + Buffer::Instance& remaining_data = data; + + int ret = 0; + size_t clen = 0; // Content-Length value + size_t full_msg_len = 0; // Length of the entire message + + while (remaining_data.length() != 0) { + ssize_t content_pos = remaining_data.search("\n\r\n", strlen("\n\r\n"), 0); + if (content_pos != -1) { + // Get the Content-Length header value so that we can find + // out the full message length. + // + content_pos += 3; // move to the line after the CRLF line. + + ssize_t content_length_start = + remaining_data.search("Content-Length:", strlen("Content-Length:"), 0, content_pos); + if (content_length_start == -1) { + break; + } + + ssize_t content_length_end = remaining_data.search( + "\r\n", strlen("\r\n"), content_length_start + strlen("Content-Length:"), content_pos); + /* The "\n\r\n" is always included in remaining_data, so could not return -1 + if (content_length_end == -1) { + break; + } + */ + + char len[10]{}; // temporary storage + remaining_data.copyOut(content_length_start + strlen("Content-Length:"), + content_length_end - content_length_start - strlen("Content-Length:"), + len); + + clen = std::atoi(len); + + // Fail if Content-Length is less then zero + // + /* atoi return value >= 0, could not < 0 + if (clen < static_cast(0)) { + break; + } + */ + + full_msg_len = content_pos + clen; + } + + // Check for partial message received. + // + if ((full_msg_len == 0) || (full_msg_len > remaining_data.length())) { + break; + } else { + // We have a full SIP message; put it on the dispatch queue. + // + Buffer::OwnedImpl message{}; + message.move(remaining_data, full_msg_len); + /* status not used + auto status = onDataReady(message); + */ + onDataReady(message); + message.drain(message.length()); + full_msg_len = 0; + /* no handle for this if + if (status != FilterStatus::StopIteration) { + // break; + }*/ + } + } // End of while (remaining_data_len > 0) + + return ret; +} + +FilterStatus Decoder::onDataReady(Buffer::Instance& data) { + ENVOY_LOG(trace, "onDataReady {}\n{}", data.length(), data.toString()); + + metadata_ = std::make_shared(data.toString()); + + decode(); + + request_ = std::make_unique(callbacks_.newDecoderEventHandler(metadata_)); + state_machine_ = std::make_unique(metadata_, request_->handler_); + State rv = state_machine_->run(); + + if (rv == State::Done || rv == State::StopIteration) { + complete(); + } + + return FilterStatus::StopIteration; +} + +auto Decoder::sipHeaderType(absl::string_view sip_line) { + static std::map sip_header_type_map{ + {"Call-ID", HeaderType::CallId}, + {"Via", HeaderType::Via}, + {"To", HeaderType::To}, + {"From", HeaderType::From}, + {"Contact", HeaderType::Contact}, + {"Record-Route", HeaderType::RRoute}, + {"CSeq", HeaderType::Cseq}, + {"Route", HeaderType::Route}, + {"Path", HeaderType::Path}, + {"Event", HeaderType::Event}, + {"Service-Route", HeaderType::SRoute}, + {"WWW-Authenticate", HeaderType::WAuth}, + {"Authorization", HeaderType::Auth}, + {"", HeaderType::Other}}; + + auto header_type_str = sip_line.substr(0, sip_line.find_first_of(':')); + if (auto result = sip_header_type_map.find(header_type_str); + result != sip_header_type_map.end()) { + return std::tuple{ + result->second, sip_line.substr(sip_line.find_first_of(':') + strlen(": "))}; + } else { + return std::tuple{ + HeaderType::Other, sip_line.substr(sip_line.find_first_of(':') + strlen(": "))}; + } +} + +MsgType Decoder::sipMsgType(absl::string_view top_line) { + if (top_line.find("SIP/2.0 ") == absl::string_view::npos) { + return MsgType::Request; + } else { + return MsgType::Response; + } +} + +MethodType Decoder::sipMethod(absl::string_view top_line) { + if (top_line.find("INVITE") != absl::string_view::npos) { + return MethodType::Invite; + } else if (top_line.find("CANCEL") != absl::string_view::npos) { + return MethodType::Cancel; + } else if (top_line.find("REGISTER") != absl::string_view::npos) { + return MethodType::Register; + } else if (top_line.find("REFER") != absl::string_view::npos) { + return MethodType::Refer; + } else if (top_line.find("UPDATE") != absl::string_view::npos) { + return MethodType::Update; + } else if (top_line.find("SUBSCRIBE") != absl::string_view::npos) { + return MethodType::Subscribe; + } else if (top_line.find("NOTIFY") != absl::string_view::npos) { + return MethodType::Notify; + } else if (top_line.find("ACK") != absl::string_view::npos) { + return MethodType::Ack; + } else if (top_line.find("BYE") != absl::string_view::npos) { + return MethodType::Bye; + } else if (top_line.find("2.0 200") != absl::string_view::npos) { + return MethodType::Ok200; + } else if (top_line.find("2.0 4") != absl::string_view::npos) { + return MethodType::Failure4xx; + } else { + return MethodType::NullMethod; + } +} + +Decoder::HeaderHandler::HeaderHandler(MessageHandler& parent) + : parent_(parent), header_processors_{ + {HeaderType::Via, &HeaderHandler::processVia}, + {HeaderType::Route, &HeaderHandler::processRoute}, + {HeaderType::Contact, &HeaderHandler::processContact}, + {HeaderType::Cseq, &HeaderHandler::processCseq}, + {HeaderType::RRoute, &HeaderHandler::processRecordRoute}, + {HeaderType::SRoute, &HeaderHandler::processServiceRoute}, + {HeaderType::WAuth, &HeaderHandler::processWwwAuth}, + {HeaderType::Auth, &HeaderHandler::processAuth}, + } {} + +int Decoder::HeaderHandler::processPath(absl::string_view& header) { + metadata()->deleteInstipOperation(rawOffset(), header); + metadata()->addEPOperation(rawOffset(), header, parent_.parent_.getOwnDomain(), + parent_.parent_.getDomainMatchParamName()); + return 0; +} + +int Decoder::HeaderHandler::processRoute(absl::string_view& header) { + if (!isFirstRoute()) { + return 0; + } + setFirstRoute(false); + + if (auto loc = header.find(";ep="); loc != absl::string_view::npos) { + // No "" of ep string + auto start = loc + strlen(";ep="); + if (auto end = header.find_first_of(";>", start); end != absl::string_view::npos) { + metadata()->setRouteEP(header.substr(start, end - start)); + } + } + + metadata()->setTopRoute(header); + metadata()->setDomain(header, parent_.parent_.getDomainMatchParamName()); + return 0; +} + +int Decoder::HeaderHandler::processRecordRoute(absl::string_view& header) { + if (!isFirstRecordRoute()) { + return 0; + } + + setFirstRecordRoute(false); + + metadata()->addEPOperation(rawOffset(), header, parent_.parent_.getOwnDomain(), + parent_.parent_.getDomainMatchParamName()); + return 0; +} + +int Decoder::HeaderHandler::processWwwAuth(absl::string_view& header) { + metadata()->addOpaqueOperation(rawOffset(), header); + return 0; +} + +int Decoder::HeaderHandler::processAuth(absl::string_view& header) { + auto loc = header.find(", opaque="); + if (loc == absl::string_view::npos) { + return 0; + } + // has "" + auto start = loc + strlen(", opaque=\""); + auto end = header.find("\"", start); + if (end == absl::string_view::npos) { + return 0; + } + + metadata()->setRouteOpaque(header.substr(start, end - start - 1)); + return 0; +} + +// +// 200 OK Header Handler +// +int Decoder::OK200HeaderHandler::processCseq(absl::string_view& header) { + if (header.find("INVITE") != absl::string_view::npos) { + metadata()->setRespMethodType(MethodType::Invite); + } else { + /* need to set a value, else when processRecordRoute, + *(metadata()->respMethodType() != MethodType::Invite) always false + * TODO: need to handle non-invite 200OK + */ + metadata()->setRespMethodType(MethodType::NullMethod); + } + return 0; +} + +int Decoder::HeaderHandler::processContact(absl::string_view& header) { + metadata()->deleteInstipOperation(rawOffset(), header); + metadata()->addEPOperation(rawOffset(), header, parent_.parent_.getOwnDomain(), + parent_.parent_.getDomainMatchParamName()); + + return 0; +} + +int Decoder::HeaderHandler::processServiceRoute(absl::string_view& header) { + if (!isFirstServiceRoute()) { + return 0; + } + setFirstServiceRoute(false); + + metadata()->addEPOperation(rawOffset(), header, parent_.parent_.getOwnDomain(), + parent_.parent_.getDomainMatchParamName()); + return 0; +} + +// +// SUBSCRIBE Header Handler +// +int Decoder::SUBSCRIBEHeaderHandler::processEvent(absl::string_view& header) { + auto& parent = dynamic_cast(this->parent_); + parent.setEventType(StringUtil::trim(header.substr(header.find("Event:") + strlen("Event:")))); + return 0; +} + +void Decoder::REGISTERHandler::parseHeader(HeaderType& type, absl::string_view& header) { + switch (type) { + case HeaderType::Route: + handler_->processRoute(header); + break; + case HeaderType::Via: + handler_->processVia(header); + break; + case HeaderType::Contact: + handler_->processContact(header); + break; + case HeaderType::Path: + handler_->processPath(header); + break; + case HeaderType::RRoute: + handler_->processRecordRoute(header); + break; + case HeaderType::Auth: + handler_->processAuth(header); + break; + default: + break; + } +} + +void Decoder::INVITEHandler::parseHeader(HeaderType& type, absl::string_view& header) { + switch (type) { + case HeaderType::Via: + handler_->processVia(header); + break; + case HeaderType::Route: + handler_->processRoute(header); + break; + case HeaderType::RRoute: + handler_->processRecordRoute(header); + break; + case HeaderType::Contact: + handler_->processContact(header); + break; + default: + break; + } +} + +void Decoder::OK200Handler::parseHeader(HeaderType& type, absl::string_view& header) { + switch (type) { + case HeaderType::Cseq: + handler_->processCseq(header); + break; + case HeaderType::Contact: + handler_->processContact(header); + break; + case HeaderType::RRoute: + handler_->processRecordRoute(header); + break; + case HeaderType::Via: + handler_->processVia(header); + break; + case HeaderType::Path: + handler_->processPath(header); + break; + case HeaderType::SRoute: + handler_->processServiceRoute(header); + break; + default: + break; + } +} + +void Decoder::GeneralHandler::parseHeader(HeaderType& type, absl::string_view& header) { + switch (type) { + case HeaderType::Route: + handler_->processRoute(header); + break; + case HeaderType::Via: + handler_->processVia(header); + break; + case HeaderType::Contact: + handler_->processContact(header); + break; + case HeaderType::Path: + handler_->processPath(header); + break; + case HeaderType::RRoute: + handler_->processRecordRoute(header); + break; + default: + break; + } +} + +void Decoder::SUBSCRIBEHandler::parseHeader(HeaderType& type, absl::string_view& header) { + switch (type) { + case HeaderType::Event: + handler_->processEvent(header); + break; + case HeaderType::Route: + handler_->processRoute(header); + break; + case HeaderType::Via: + handler_->processVia(header); + break; + case HeaderType::Contact: + handler_->processContact(header); + break; + case HeaderType::RRoute: + handler_->processRecordRoute(header); + break; + default: + break; + } +} + +void Decoder::FAILURE4XXHandler::parseHeader(HeaderType& type, absl::string_view& header) { + switch (type) { + case HeaderType::Contact: + handler_->processContact(header); + break; + case HeaderType::WAuth: + handler_->processWwwAuth(header); + break; + case HeaderType::Via: + handler_->processVia(header); + break; + default: + break; + } +} + +void Decoder::OthersHandler::parseHeader(HeaderType& type, absl::string_view& header) { + switch (type) { + case HeaderType::Via: + handler_->processVia(header); + break; + case HeaderType::Contact: + handler_->processContact(header); + break; + case HeaderType::Path: + handler_->processPath(header); + break; + case HeaderType::RRoute: + handler_->processRecordRoute(header); + break; + case HeaderType::SRoute: + handler_->processServiceRoute(header); + break; + default: + break; + } +} + +std::shared_ptr Decoder::MessageFactory::create(MethodType type, + Decoder& parent) { + switch (type) { + case MethodType::Invite: + return std::make_shared(parent); + case MethodType::Ok200: + return std::make_shared(parent); + case MethodType::Register: + return std::make_shared(parent); + case MethodType::Subscribe: + return std::make_shared(parent); + case MethodType::Failure4xx: + return std::make_shared(parent); + case MethodType::Ack: + case MethodType::Bye: + case MethodType::Cancel: + return std::make_shared(parent); + default: + return std::make_shared(parent); + } +} + +int Decoder::decode() { + auto& metadata = metadata_; + absl::string_view msg = absl::string_view(metadata->rawMsg()); + + std::shared_ptr handler; + + while (!msg.empty()) { + std::string::size_type crlf = msg.find("\r\n"); + // After message reassemble, this condition could not be true + // if (crlf == absl::string_view::npos) { + // break; + // } + + if (current_header_ == HeaderType::TopLine) { + // Sip Request Line + absl::string_view sip_line = msg.substr(0, crlf); + + parseTopLine(sip_line); + current_header_ = HeaderType::Other; + + handler = MessageFactory::create(metadata->methodType(), *this); + } else { + // Normal Header Line + absl::string_view sip_line = msg.substr(0, crlf); + auto [current_header, header_value] = sipHeaderType(sip_line); + this->current_header_ = current_header; + handler->parseHeader(current_header, sip_line); + } + + msg = msg.substr(crlf + strlen("\r\n")); + raw_offset_ += crlf + strlen("\r\n"); + +#if __cplusplus > 201703L + if (msg.starts_with("\r\n")) { +#else + if (msg[0] == '\r' && msg[1] == '\n') { +#endif + break; + } + } + + if (!metadata->topRoute().has_value() && metadata->msgType() == MsgType::Request) { + metadata->setDomain(metadata->requestURI().value(), getDomainMatchParamName()); + } + return 0; +} + +int Decoder::HeaderHandler::processVia(absl::string_view& header) { + if (!isFirstVia()) { + return 0; + } + + metadata()->setTransactionId(header); + + setFirstVia(false); + return 0; +} + +int Decoder::parseTopLine(absl::string_view& top_line) { + auto metadata = metadata_; + metadata->setMsgType(sipMsgType(top_line)); + metadata->setMethodType(sipMethod(top_line)); + + if (metadata->msgType() == MsgType::Request) { + metadata->setRequestURI(top_line); + } + + if (auto loc = top_line.find(";ep="); loc != absl::string_view::npos) { + // Need to exclude the "" of ep string + auto start = loc + strlen(";ep="); + + if (auto end = top_line.find_first_of("; ", start); end != absl::string_view::npos) { + metadata->setRouteEP(top_line.substr(start, end - start)); + } + } + return 0; +} + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/decoder.h b/contrib/sip_proxy/filters/network/source/decoder.h new file mode 100644 index 0000000000000..035f32dd261c6 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/decoder.h @@ -0,0 +1,392 @@ +#pragma once + +#include "envoy/buffer/buffer.h" + +#include "source/common/buffer/buffer_impl.h" +#include "source/common/common/assert.h" +#include "source/common/common/logger.h" + +#include "contrib/sip_proxy/filters/network/source/filters/filter.h" +#include "contrib/sip_proxy/filters/network/source/protocol.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +#define ALL_PROTOCOL_STATES(FUNCTION) \ + FUNCTION(StopIteration) \ + FUNCTION(WaitForData) \ + FUNCTION(TransportBegin) \ + FUNCTION(MessageBegin) \ + FUNCTION(MessageEnd) \ + FUNCTION(TransportEnd) \ + FUNCTION(Done) + +/** + * ProtocolState represents a set of states used in a state machine to decode + * Sip requests and responses. + */ +enum class State { ALL_PROTOCOL_STATES(GENERATE_ENUM) }; + +class StateNameValues { +public: + static const std::string& name(State state) { + size_t i = static_cast(state); + ASSERT(i < names().size()); + return names()[i]; + } + +private: + static const std::vector& names() { + CONSTRUCT_ON_FIRST_USE(std::vector, {ALL_PROTOCOL_STATES(GENERATE_STRING)}); + } +}; + +/** + * DecoderStateMachine is the Sip message state machine + */ +class DecoderStateMachine : public Logger::Loggable { +public: + DecoderStateMachine(MessageMetadataSharedPtr& metadata, DecoderEventHandler& handler) + : metadata_(metadata), handler_(handler), state_(State::TransportBegin) {} + + /** + * Consumes as much data from the configured Buffer as possible and executes + * the decoding state machine. Returns ProtocolState::WaitForData if more data + * is required to complete processing of a message. Returns + * ProtocolState::Done when the end of a message is successfully processed. + * Once the Done state is reached, further invocations of run return + * immediately with Done. + * + * @param buffer a buffer containing the remaining data to be processed + * @return ProtocolState returns with ProtocolState::WaitForData or + * ProtocolState::Done + * @throw Envoy Exception if thrown by the underlying Protocol + */ + State run(); + + /** + * @return the current ProtocolState + */ + State currentState() const { return state_; } + + /** + * Set the current state. Used for testing only. + */ + void setCurrentState(State state) { state_ = state; } + +private: + friend class SipDecoderTest; + struct DecoderStatus { + DecoderStatus(State next_state) : next_state_(next_state){}; + DecoderStatus(State next_state, FilterStatus filter_status) + : next_state_(next_state), filter_status_(filter_status){}; + + State next_state_; + absl::optional filter_status_; + }; + + // These functions map directly to the matching ProtocolState values. Each + // returns the next state or ProtocolState::WaitForData if more data is + // required. + DecoderStatus transportBegin(); + DecoderStatus messageBegin(); + DecoderStatus messageEnd(); + DecoderStatus transportEnd(); + + // handleState delegates to the appropriate method based on state_. + DecoderStatus handleState(); + + MessageMetadataSharedPtr metadata_; + DecoderEventHandler& handler_; + State state_; +}; + +using DecoderStateMachinePtr = std::unique_ptr; + +class DecoderCallbacks { +public: + virtual ~DecoderCallbacks() = default; + + /** + * @return DecoderEventHandler& a new DecoderEventHandler for a message. + */ + virtual DecoderEventHandler& newDecoderEventHandler(MessageMetadataSharedPtr metadata) PURE; + virtual absl::string_view getLocalIp() PURE; + virtual std::string getOwnDomain() PURE; + virtual std::string getDomainMatchParamName() PURE; +}; + +/** + * Decoder encapsulates a configured Transport and Protocol and provides the + * ability to decode Sip messages. + */ +class Decoder : public Logger::Loggable { +public: + Decoder(DecoderCallbacks& callbacks); + + /** + * Drains data from the given buffer while executing a state machine over the + * data. + * + * @param data a Buffer containing Sip protocol data + * @return FilterStatus::StopIteration when waiting for filter continuation, + * Continue otherwise. + * @throw EnvoyException on Sip protocol errors + */ + FilterStatus onData(Buffer::Instance& data); + std::string getOwnDomain() { return callbacks_.getOwnDomain(); } + std::string getDomainMatchParamName() { return callbacks_.getDomainMatchParamName(); } + +protected: + MessageMetadataSharedPtr metadata() { return metadata_; } + +private: + friend class SipConnectionManagerTest; + friend class SipDecoderTest; + struct ActiveRequest { + ActiveRequest(DecoderEventHandler& handler) : handler_(handler) {} + + DecoderEventHandler& handler_; + }; + using ActiveRequestPtr = std::unique_ptr; + + void complete(); + + int reassemble(Buffer::Instance& data); + + /** + * After the data reassembled, parse the data and handle them + * @param data string + * @param length actual length of data, data.length() may less + * than length when other data after data. + */ + FilterStatus onDataReady(Buffer::Instance& data); + + int decode(); + + HeaderType currentHeader() { return current_header_; } + size_t rawOffset() { return raw_offset_; } + void setCurrentHeader(HeaderType data) { current_header_ = data; } + + bool isFirstVia() { return first_via_; } + void setFirstVia(bool flag) { first_via_ = flag; } + bool isFirstRoute() { return first_route_; } + void setFirstRoute(bool flag) { first_route_ = flag; } + bool isFirstRecordRoute() { return first_record_route_; } + void setFirstRecordRoute(bool flag) { first_record_route_ = flag; } + bool isFirstServiceRoute() { return first_service_route_; } + void setFirstServiceRoute(bool flag) { first_service_route_ = flag; } + + auto sipHeaderType(absl::string_view sip_line); + MsgType sipMsgType(absl::string_view top_line); + MethodType sipMethod(absl::string_view top_line); + + int parseTopLine(absl::string_view& top_line); + + HeaderType current_header_{HeaderType::TopLine}; + size_t raw_offset_{0}; + + bool first_via_{true}; + bool first_route_{true}; + bool first_record_route_{true}; + bool first_service_route_{true}; + + class MessageHandler; + class HeaderHandler { + public: + HeaderHandler(MessageHandler& parent); + virtual ~HeaderHandler() = default; + + using HeaderProcessor = + absl::flat_hash_map>; + + virtual int processVia(absl::string_view& header); + virtual int processContact(absl::string_view& header); + virtual int processPath(absl::string_view& header); + virtual int processEvent(absl::string_view& header) { + UNREFERENCED_PARAMETER(header); + return 0; + }; + virtual int processRoute(absl::string_view& header); + virtual int processCseq(absl::string_view& header) { + UNREFERENCED_PARAMETER(header); + return 0; + } + virtual int processRecordRoute(absl::string_view& header); + virtual int processServiceRoute(absl::string_view& header); + virtual int processWwwAuth(absl::string_view& header); + virtual int processAuth(absl::string_view& header); + + MessageMetadataSharedPtr metadata() { return parent_.metadata(); } + + HeaderType currentHeader() { return parent_.currentHeader(); } + size_t rawOffset() { return parent_.rawOffset(); } + bool isFirstVia() { return parent_.isFirstVia(); } + bool isFirstRoute() { return parent_.isFirstRoute(); } + bool isFirstRecordRoute() { return parent_.isFirstRecordRoute(); } + bool isFirstServiceRoute() { return parent_.isFirstServiceRoute(); } + void setFirstVia(bool flag) { parent_.setFirstVia(flag); } + void setFirstRoute(bool flag) { parent_.setFirstRoute(flag); } + void setFirstRecordRoute(bool flag) { parent_.setFirstRecordRoute(flag); } + void setFirstServiceRoute(bool flag) { parent_.setFirstServiceRoute(flag); } + + MessageHandler& parent_; + HeaderProcessor header_processors_; + }; + + class MessageHandler { + public: + MessageHandler(std::shared_ptr handler, Decoder& parent) + : parent_(parent), handler_(std::move(handler)) {} + virtual ~MessageHandler() = default; + + virtual void parseHeader(HeaderType& type, absl::string_view& header) PURE; + + MessageMetadataSharedPtr metadata() { return parent_.metadata(); } + HeaderType currentHeader() { return parent_.currentHeader(); } + size_t rawOffset() { return parent_.rawOffset(); } + bool isFirstVia() { return parent_.isFirstVia(); } + bool isFirstRoute() { return parent_.isFirstRoute(); } + bool isFirstRecordRoute() { return parent_.isFirstRecordRoute(); } + bool isFirstServiceRoute() { return parent_.isFirstServiceRoute(); } + void setFirstVia(bool flag) { parent_.setFirstVia(flag); } + void setFirstRoute(bool flag) { parent_.setFirstRoute(flag); } + void setFirstRecordRoute(bool flag) { parent_.setFirstRecordRoute(flag); } + void setFirstServiceRoute(bool flag) { parent_.setFirstServiceRoute(flag); } + + Decoder& parent_; + + protected: + std::shared_ptr handler_; + // Decoder& parent_; + }; + + class REGISTERHeaderHandler : public HeaderHandler { + public: + using HeaderHandler::HeaderHandler; + }; + + class INVITEHeaderHandler : public HeaderHandler { + public: + using HeaderHandler::HeaderHandler; + }; + + class OK200HeaderHandler : public HeaderHandler { + public: + using HeaderHandler::HeaderHandler; + int processCseq(absl::string_view& header) override; + }; + + class GeneralHeaderHandler : public HeaderHandler { + public: + using HeaderHandler::HeaderHandler; + }; + + class SUBSCRIBEHeaderHandler : public HeaderHandler { + public: + using HeaderHandler::HeaderHandler; + int processEvent(absl::string_view& header) override; + }; + + class FAILURE4XXHeaderHandler : public HeaderHandler { + public: + using HeaderHandler::HeaderHandler; + }; + + class REGISTERHandler : public MessageHandler { + public: + REGISTERHandler(Decoder& parent) + : MessageHandler(std::make_shared(*this), parent) {} + ~REGISTERHandler() override = default; + + void parseHeader(HeaderType& type, absl::string_view& header) override; + }; + + class INVITEHandler : public MessageHandler { + public: + INVITEHandler(Decoder& parent) + : MessageHandler(std::make_shared(*this), parent) {} + ~INVITEHandler() override = default; + + void parseHeader(HeaderType& type, absl::string_view& header) override; + }; + + class OK200Handler : public MessageHandler { + public: + OK200Handler(Decoder& parent) + : MessageHandler(std::make_shared(*this), parent) {} + ~OK200Handler() override = default; + + void parseHeader(HeaderType& type, absl::string_view& header) override; + }; + + // This is used to handle ACK/BYE/CANCEL + class GeneralHandler : public MessageHandler { + public: + GeneralHandler(Decoder& parent) + : MessageHandler(std::make_shared(*this), parent) {} + ~GeneralHandler() override = default; + + void parseHeader(HeaderType& type, absl::string_view& header) override; + }; + + class SUBSCRIBEHandler : public MessageHandler { + public: + SUBSCRIBEHandler(Decoder& parent) + : MessageHandler(std::make_shared(*this), parent) {} + ~SUBSCRIBEHandler() override = default; + void parseHeader(HeaderType& type, absl::string_view& header) override; + void setEventType(absl::string_view value) { + if (value == "reg") { + event_type_ = EventType::REG; + } else { + event_type_ = EventType::OTHERS; + } + } + + private: + enum class EventType { REG, OTHERS }; + + EventType event_type_; + }; + + // This is used to handle Other Message + class OthersHandler : public MessageHandler { + public: + OthersHandler(Decoder& parent) + : MessageHandler(std::make_shared(*this), parent) {} + ~OthersHandler() override = default; + + void parseHeader(HeaderType& type, absl::string_view& header) override; + }; + + class FAILURE4XXHandler : public MessageHandler { + public: + FAILURE4XXHandler(Decoder& parent) + : MessageHandler(std::make_shared(*this), parent) {} + ~FAILURE4XXHandler() override = default; + + void parseHeader(HeaderType& type, absl::string_view& header) override; + }; + + class MessageFactory { + public: + static std::shared_ptr create(MethodType type, Decoder& parent); + }; + + DecoderCallbacks& callbacks_; + ActiveRequestPtr request_; + MessageMetadataSharedPtr metadata_; + DecoderStateMachinePtr state_machine_; + bool start_new_message_{true}; +}; + +using DecoderPtr = std::unique_ptr; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/decoder_events.h b/contrib/sip_proxy/filters/network/source/decoder_events.h new file mode 100644 index 0000000000000..625b97322e7e4 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/decoder_events.h @@ -0,0 +1,55 @@ +#pragma once + +#include "contrib/sip_proxy/filters/network/source/metadata.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +enum class FilterStatus { + // Continue filter chain iteration. + Continue, + + // Stop iterating over filters in the filter chain. + StopIteration +}; + +class DecoderEventHandler { +public: + virtual ~DecoderEventHandler() = default; + + /** + * Indicates the start of a Sip transport frame was detected. Unframed transports generate + * simulated start messages. + * @param metadata MessageMetadataSharedPtr describing as much as is currently known about the + * message + */ + virtual FilterStatus transportBegin(MessageMetadataSharedPtr metadata) PURE; + + /** + * Indicates the end of a Sip transport frame was detected. Unframed transport generate + * simulated complete messages. + */ + virtual FilterStatus transportEnd() PURE; + + /** + * Indicates that the start of a Sip protocol message was detected. + * @param metadata MessageMetadataSharedPtr describing the message + * @return FilterStatus to indicate if filter chain iteration should continue + */ + virtual FilterStatus messageBegin(MessageMetadataSharedPtr metadata) PURE; + + /** + * Indicates that the end of a Sip protocol message was detected. + * @return FilterStatus to indicate if filter chain iteration should continue + */ + virtual FilterStatus messageEnd() PURE; +}; + +using DecoderEventHandlerSharedPtr = std::shared_ptr; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/encoder.cc b/contrib/sip_proxy/filters/network/source/encoder.cc new file mode 100644 index 0000000000000..1bc451d6e4b51 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/encoder.cc @@ -0,0 +1,65 @@ +#include "contrib/sip_proxy/filters/network/source/encoder.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +void EncoderImpl::encode(const MessageMetadataSharedPtr& metadata, Buffer::Instance& out) { + std::string output = ""; + std::string& raw_msg = metadata->rawMsg(); + std::sort(metadata->operationList().begin(), metadata->operationList().end()); + + size_t previous_position = 0; + for (auto& operation : metadata->operationList()) { + switch (operation.type_) { + case OperationType::Insert: { + std::string value = absl::get(operation.value_).value_; + if (value == ";ep=" || value == ",opaque=") { + if (metadata->ep().has_value() && metadata->ep().value().length() > 0) { + output += raw_msg.substr(previous_position, operation.position_ - previous_position); + previous_position = operation.position_; + + output += absl::get(operation.value_).value_; + if (value == ",opaque=") { + output += "\""; + } + output += std::string(metadata->ep().value()); + if (value == ",opaque=") { + output += "\""; + } + } + } else { + output += raw_msg.substr(previous_position, operation.position_ - previous_position); + previous_position = operation.position_; + + output += absl::get(operation.value_).value_; + } + break; + } + case OperationType::Modify: + output += raw_msg.substr(previous_position, operation.position_ - previous_position); + previous_position = operation.position_; + + output += absl::get(operation.value_).dest_; + previous_position += absl::get(operation.value_).src_length_; + break; + case OperationType::Delete: + output += raw_msg.substr(previous_position, operation.position_ - previous_position); + previous_position = operation.position_; + + previous_position += absl::get(operation.value_).length_; + break; + default: + break; + } + } + + output += raw_msg.substr(previous_position); + out.add(output); +} + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/encoder.h b/contrib/sip_proxy/filters/network/source/encoder.h new file mode 100644 index 0000000000000..1cab707a54336 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/encoder.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/common/exception.h" + +#include "source/common/common/assert.h" +#include "source/common/common/logger.h" + +#include "contrib/sip_proxy/filters/network/source/metadata.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +class Encoder : public Logger::Loggable { +public: + virtual ~Encoder() = default; + virtual void encode(const MessageMetadataSharedPtr& metadata, Buffer::Instance& out) PURE; +}; + +class EncoderImpl : public Encoder { +public: + void encode(const MessageMetadataSharedPtr& metadata, Buffer::Instance& out) override; +}; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/filters/BUILD b/contrib/sip_proxy/filters/network/source/filters/BUILD new file mode 100644 index 0000000000000..a8e9b8d9b1e09 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/filters/BUILD @@ -0,0 +1,60 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_library( + name = "filter_config_interface", + hdrs = ["filter_config.h"], + deps = [ + ":filter_interface", + "//envoy/config:typed_config_interface", + "//envoy/server:filter_config_interface", + "//source/common/common:macros", + "//source/common/protobuf:cc_wkt_protos", + ], +) + +envoy_cc_library( + name = "factory_base_lib", + hdrs = ["factory_base.h"], + deps = [ + ":filter_config_interface", + "//source/common/protobuf:utility_lib", + ], +) + +envoy_cc_library( + name = "filter_interface", + hdrs = ["filter.h"], + deps = [ + "//contrib/sip_proxy/filters/network/source:decoder_events_lib", + "//contrib/sip_proxy/filters/network/source:protocol_interface", + "//contrib/sip_proxy/filters/network/source:sip_lib", + "//contrib/sip_proxy/filters/network/source/router:router_interface", + "//envoy/buffer:buffer_interface", + "//envoy/network:connection_interface", + "//envoy/stream_info:stream_info_interface", + ], +) + +envoy_cc_library( + name = "well_known_names", + hdrs = ["well_known_names.h"], + deps = [ + "//source/common/singleton:const_singleton", + ], +) + +envoy_cc_library( + name = "pass_through_filter_lib", + hdrs = ["pass_through_filter.h"], + deps = [ + ":filter_interface", + ], +) diff --git a/contrib/sip_proxy/filters/network/source/filters/factory_base.h b/contrib/sip_proxy/filters/network/source/filters/factory_base.h new file mode 100644 index 0000000000000..05fd66503e70e --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/filters/factory_base.h @@ -0,0 +1,46 @@ +#pragma once + +#include "source/common/protobuf/utility.h" + +#include "contrib/sip_proxy/filters/network/source/filters/filter_config.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace SipFilters { + +template class FactoryBase : public NamedSipFilterConfigFactory { +public: + FilterFactoryCb + createFilterFactoryFromProto(const Protobuf::Message& proto_config, + const std::string& stats_prefix, + Server::Configuration::FactoryContext& context) override { + return createFilterFactoryFromProtoTyped(MessageUtil::downcastAndValidate( + proto_config, context.messageValidationVisitor()), + stats_prefix, context); + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { return name_; } + +protected: + FactoryBase(const std::string& name) : name_(name) {} + +private: + virtual FilterFactoryCb + createFilterFactoryFromProtoTyped(const ConfigProto& proto_config, + const std::string& stats_prefix, + Server::Configuration::FactoryContext& context) PURE; + + const std::string name_; +}; + +} // namespace SipFilters +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/filters/filter.h b/contrib/sip_proxy/filters/network/source/filters/filter.h new file mode 100644 index 0000000000000..fd19df1f282f3 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/filters/filter.h @@ -0,0 +1,166 @@ +#pragma once + +#include +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/network/connection.h" +#include "envoy/stream_info/stream_info.h" + +#include "contrib/sip_proxy/filters/network/source/decoder_events.h" +#include "contrib/sip_proxy/filters/network/source/protocol.h" +#include "contrib/sip_proxy/filters/network/source/router/router.h" +#include "contrib/sip_proxy/filters/network/source/sip.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace SipFilters { + +enum class ResponseStatus { + MoreData = 0, // The upstream response requires more data. + Complete = 1, // The upstream response is complete. + Reset = 2, // The upstream response is invalid and its connection must be reset. +}; + +/** + * Decoder filter callbacks add additional callbacks. + */ +class DecoderFilterCallbacks { +public: + virtual ~DecoderFilterCallbacks() = default; + + /** + * @return uint64_t the ID of the originating stream for logging purposes. + */ + virtual uint64_t streamId() const PURE; + + /** + * @return string the ID of the transaction. + */ + virtual std::string transactionId() const PURE; + + /** + * @return const Network::Connection* the originating connection, or nullptr if there is none. + */ + virtual const Network::Connection* connection() const PURE; + + /** + * @return RouteConstSharedPtr the route for the current request. + */ + virtual Router::RouteConstSharedPtr route() PURE; + + /** + * Create a locally generated response using the provided response object. + * @param response DirectResponse the response to send to the downstream client + * @param end_stream if true, the downstream connection should be closed after this response + */ + virtual void sendLocalReply(const SipProxy::DirectResponse& response, bool end_stream) PURE; + + /** + * Indicates the start of an upstream response. May only be called once. + * @param transport the transport used by the upstream response + * @param protocol the protocol used by the upstream response + */ + virtual void startUpstreamResponse() PURE; + + /** + * Called with upstream response data. + * @param data supplies the upstream's data + * @return ResponseStatus indicating if the upstream response requires more data, is complete, + * or if an error occurred requiring the upstream connection to be reset. + */ + virtual ResponseStatus upstreamData(MessageMetadataSharedPtr metadata) PURE; + + /** + * Reset the downstream connection. + */ + virtual void resetDownstreamConnection() PURE; + + /** + * @return StreamInfo for logging purposes. + */ + virtual StreamInfo::StreamInfo& streamInfo() PURE; + + virtual std::shared_ptr transactionInfos() PURE; + virtual std::shared_ptr settings() PURE; + virtual void onReset() PURE; +}; + +/** + * Decoder filter interface. + */ +class DecoderFilter : public virtual DecoderEventHandler { +public: + ~DecoderFilter() override = default; + + /** + * This routine is called prior to a filter being destroyed. This may happen after normal stream + * finish (both downstream and upstream) or due to reset. Every filter is responsible for making + * sure that any async events are cleaned up in the context of this routine. This includes timers, + * network calls, etc. The reason there is an onDestroy() method vs. doing this type of cleanup + * in the destructor is due to the deferred deletion model that Envoy uses to avoid stack unwind + * complications. Filters must not invoke either encoder or decoder filter callbacks after having + * onDestroy() invoked. + */ + virtual void onDestroy() PURE; + + /** + * Called by the connection manager once to initialize the filter decoder callbacks that the + * filter should use. Callbacks will not be invoked by the filter after onDestroy() is called. + */ + virtual void setDecoderFilterCallbacks(DecoderFilterCallbacks& callbacks) PURE; +}; + +using DecoderFilterSharedPtr = std::shared_ptr; + +/** + * These callbacks are provided by the connection manager to the factory so that the factory can + * build the filter chain in an application specific way. + */ +class FilterChainFactoryCallbacks { +public: + virtual ~FilterChainFactoryCallbacks() = default; + + /** + * Add a decoder filter that is used when reading connection data. + * @param filter supplies the filter to add. + */ + virtual void addDecoderFilter(DecoderFilterSharedPtr filter) PURE; +}; + +/** + * This function is used to wrap the creation of a Sip filter chain for new connections as they + * come in. Filter factories create the function at configuration initialization time, and then + * they are used at runtime. + * @param callbacks supplies the callbacks for the stream to install filters to. Typically the + * function will install a single filter, but it's technically possibly to install more than one + * if desired. + */ +using FilterFactoryCb = std::function; + +/** + * A FilterChainFactory is used by a connection manager to create a Sip level filter chain when + * a new connection is created. Typically it would be implemented by a configuration engine that + * would install a set of filters that are able to process an application scenario on top of a + * stream of Sip requests. + */ +class FilterChainFactory { +public: + virtual ~FilterChainFactory() = default; + + /** + * Called when a new Sip stream is created on the connection. + * @param callbacks supplies the "sink" that is used for actually creating the filter chain. @see + * FilterChainFactoryCallbacks. + */ + virtual void createFilterChain(FilterChainFactoryCallbacks& callbacks) PURE; +}; + +} // namespace SipFilters +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/filters/filter_config.h b/contrib/sip_proxy/filters/network/source/filters/filter_config.h new file mode 100644 index 0000000000000..1748556f5a0b3 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/filters/filter_config.h @@ -0,0 +1,45 @@ +#pragma once + +#include "envoy/config/typed_config.h" +#include "envoy/server/filter_config.h" + +#include "source/common/common/macros.h" +#include "source/common/protobuf/protobuf.h" + +#include "contrib/sip_proxy/filters/network/source/filters/filter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace SipFilters { + +/** + * Implemented by each Sip filter and registered via Registry::registerFactory or the + * convenience class RegisterFactory. + */ +class NamedSipFilterConfigFactory : public Envoy::Config::TypedFactory { +public: + ~NamedSipFilterConfigFactory() override = default; + + /** + * Create a particular sip filter factory implementation. If the implementation is unable to + * produce a factory with the provided parameters, it should throw an EnvoyException in the case + * of general error. The returned callback should always be initialized. + * @param config supplies the configuration for the filter + * @param stat_prefix prefix for stat logging + * @param context supplies the filter's context. + * @return FilterFactoryCb the factory creation function. + */ + virtual FilterFactoryCb + createFilterFactoryFromProto(const Protobuf::Message& config, const std::string& stat_prefix, + Server::Configuration::FactoryContext& context) PURE; + + std::string category() const override { return "envoy.sip_proxy.filters"; } +}; + +} // namespace SipFilters +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/filters/pass_through_filter.h b/contrib/sip_proxy/filters/network/source/filters/pass_through_filter.h new file mode 100644 index 0000000000000..bc8c6cc9c5370 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/filters/pass_through_filter.h @@ -0,0 +1,46 @@ +#pragma once + +#include "absl/strings/string_view.h" +#include "contrib/sip_proxy/filters/network/source/filters/filter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace SipFilters { + +/** + * Pass through Sip decoder filter. Continue at each decoding state within the series of + * transitions. + */ +class PassThroughDecoderFilter : public DecoderFilter { +public: + // SipDecoderFilter + void onDestroy() override {} + + void setDecoderFilterCallbacks(DecoderFilterCallbacks& callbacks) override { + decoder_callbacks_ = &callbacks; + }; + + // Sip Decoder State Machine + SipProxy::FilterStatus transportBegin(SipProxy::MessageMetadataSharedPtr) override { + return SipProxy::FilterStatus::Continue; + } + + SipProxy::FilterStatus transportEnd() override { return SipProxy::FilterStatus::Continue; } + + SipProxy::FilterStatus messageBegin(SipProxy::MessageMetadataSharedPtr) override { + return SipProxy::FilterStatus::Continue; + } + + SipProxy::FilterStatus messageEnd() override { return SipProxy::FilterStatus::Continue; } + +protected: + DecoderFilterCallbacks* decoder_callbacks_{}; +}; + +} // namespace SipFilters +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/filters/well_known_names.h b/contrib/sip_proxy/filters/network/source/filters/well_known_names.h new file mode 100644 index 0000000000000..d1d712ea67a17 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/filters/well_known_names.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +#include "source/common/singleton/const_singleton.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace SipFilters { + +/** + * Well-known http filter names. + * NOTE: New filters should use the well known name: envoy.filters.sip.name. + */ +class SipFilterNameValues { +public: + // Router filter + const std::string ROUTER = "envoy.filters.sip.router"; +}; + +using SipFilterNames = ConstSingleton; + +} // namespace SipFilters + +const std::string SipProxy = "envoy.filters.network.sip_proxy"; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/metadata.h b/contrib/sip_proxy/filters/network/source/metadata.h new file mode 100644 index 0000000000000..e422385f1d70e --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/metadata.h @@ -0,0 +1,209 @@ +#pragma once + +#include +#include +#include +#include + +#include "source/common/common/assert.h" +#include "source/common/common/logger.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "contrib/sip_proxy/filters/network/source/operation.h" +#include "contrib/sip_proxy/filters/network/source/sip.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +/** + * MessageMetadata encapsulates metadata about Sip messages. The various fields are considered + * optional since they may come from either the transport or protocol in some cases. Unless + * otherwise noted, accessor methods throw absl::bad_optional_access if the corresponding value has + * not been set. + */ +class MessageMetadata : public Logger::Loggable { +public: + MessageMetadata() = default; + MessageMetadata(std::string&& raw_msg) : raw_msg_(std::move(raw_msg)) {} + + MsgType msgType() { return msg_type_; } + MethodType methodType() { return method_type_; } + MethodType respMethodType() { return resp_method_type_; } + absl::optional ep() { return ep_; } + std::vector& operationList() { return operation_list_; } + absl::optional routeEP() { return route_ep_; } + absl::optional routeOpaque() { return route_opaque_; } + + absl::optional requestURI() { return request_uri_; } + absl::optional topRoute() { return top_route_; } + absl::optional domain() { return domain_; } + absl::optional transactionId() { return transaction_id_; } + absl::optional destination() { return destination_; } + + std::string& rawMsg() { return raw_msg_; } + + void setMsgType(MsgType data) { msg_type_ = data; } + void setMethodType(MethodType data) { method_type_ = data; } + void setRespMethodType(MethodType data) { resp_method_type_ = data; } + void setOperation(Operation op) { operation_list_.emplace_back(op); } + void setEP(absl::string_view data) { ep_ = data; } + void setRouteEP(absl::string_view data) { route_ep_ = data; } + void setRouteOpaque(absl::string_view data) { route_opaque_ = data; } + + void setRequestURI(absl::string_view data) { request_uri_ = data; } + void setTopRoute(absl::string_view data) { top_route_ = data; } + void setDomain(absl::string_view header, std::string domain_match_param_name) { + domain_ = getDomain(header, domain_match_param_name); + } + + // void addEPOperation(size_t raw_offset, absl::string_view& header, std::string& own_domain, + // std::string& domain_match_param_name) { + void addEPOperation(size_t raw_offset, absl::string_view& header, std::string own_domain, + std::string domain_match_param_name) { + ENVOY_LOG(debug, "header: {}\n own_domain: {}\n domain_match_param_name: {}", header, + own_domain, domain_match_param_name); + if (header.find(";ep=") != absl::string_view::npos) { + // already Contact have ep + return; + } + auto pos = header.find(">"); + if (pos == absl::string_view::npos) { + // no url + return; + } + + // Get domain + absl::string_view domain = getDomain(header, domain_match_param_name); + + // Compare the domain + if (domain != own_domain) { + ENVOY_LOG(debug, "header domain:{} not matches own_domain:{}", domain, own_domain); + return; + } + + setOperation(Operation(OperationType::Insert, raw_offset + pos, InsertOperationValue(";ep="))); + } + + void addOpaqueOperation(size_t raw_offset, absl::string_view& header) { + if (header.find("opaque=") != absl::string_view::npos) { + // already has opaque + return; + } + auto pos = header.length(); + setOperation( + Operation(OperationType::Insert, raw_offset + pos, InsertOperationValue(",opaque="))); + } + + void deleteInstipOperation(size_t raw_offset, absl::string_view& header) { + // Delete inst-ip and remove "sip:" in x-suri + if (auto pos = header.find(";inst-ip="); pos != absl::string_view::npos) { + setOperation( + Operation(OperationType::Delete, raw_offset + pos, + DeleteOperationValue( + header.substr(pos, header.find_first_of(";>", pos + 1) - pos).size()))); + auto xsuri = header.find("sip:pcsf-cfed"); + setOperation(Operation(OperationType::Delete, raw_offset + xsuri, DeleteOperationValue(4))); + } + } + + // input is the full SIP header + void setTransactionId(absl::string_view data) { + auto start_index = data.find("branch="); + if (start_index == absl::string_view::npos) { + return; + } + start_index += strlen("branch="); + + auto end_index = data.find_first_of(";>", start_index); + if (end_index == absl::string_view::npos) { + end_index = data.size(); + } + transaction_id_ = data.substr(start_index, end_index - start_index); + } + + void setDestination(absl::string_view destination) { destination_ = destination; } + /*only used in UT*/ + void resetTransactionId() { transaction_id_.reset(); } + +private: + MsgType msg_type_; + MethodType method_type_; + MethodType resp_method_type_; + std::vector operation_list_; + absl::optional ep_{}; + absl::optional pep_{}; + absl::optional route_ep_{}; + absl::optional route_opaque_{}; + + absl::optional request_uri_{}; + absl::optional top_route_{}; + absl::optional domain_{}; + absl::optional transaction_id_{}; + absl::optional destination_{}; + + std::string raw_msg_{}; + + absl::string_view getDomain(absl::string_view header, std::string domain_match_param_name) { + ENVOY_LOG(debug, "header: {}\ndomain_match_param_name: {}", header, domain_match_param_name); + + // Get domain + absl::string_view domain = ""; + + if (domain_match_param_name != "host") { + auto start = header.find(domain_match_param_name); + if (start == absl::string_view::npos) { + domain = ""; + } else { + // domain_match_param_name + "=" + // start = start + strlen(domain_match_param_name.c_str()) + strlen("=") ; + start = start + domain_match_param_name.length() + strlen("="); + if ("sip:" == header.substr(start, strlen("sip:"))) { + start += strlen("sip:"); + } + // end + auto end = header.find_first_of(":;>", start); + if (end == absl::string_view::npos) { + domain = ""; + } else { + domain = header.substr(start, end - start); + } + } + } + + // Still get host if mapped domain is empty + if (domain_match_param_name == "host" || domain == "") { + auto start = header.find("sip:"); + if (start == absl::string_view::npos) { + return ""; + } + start += strlen("sip:"); + auto end = header.find_first_of(":;>", start); + if (end == absl::string_view::npos) { + return ""; + } + + auto addr = header.substr(start, end - start); + + // Remove name in format of sip:name@addr:pos + auto pos = addr.find("@"); + if (pos == absl::string_view::npos) { + domain = header.substr(start, end - start); + } else { + pos += strlen("@"); + domain = addr.substr(pos, addr.length() - pos); + } + } + + return domain; + } +}; + +using MessageMetadataSharedPtr = std::shared_ptr; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/operation.h b/contrib/sip_proxy/filters/network/source/operation.h new file mode 100644 index 0000000000000..6cadd12a19afb --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/operation.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "absl/types/variant.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +enum class OperationType { + Invalid = 0, + Insert = 1, + Delete = 2, + Modify = 3, + Query = 4, // not used yet. +}; + +struct InsertOperationValue { + InsertOperationValue(std::string&& value) : value_(value) {} + std::string value_; +}; +struct DeleteOperationValue { + DeleteOperationValue(size_t length) : length_(length) {} + size_t length_; +}; +struct ModifyOperationValue { + ModifyOperationValue(size_t src_length, std::string&& dest) + : src_length_(src_length), dest_(dest) {} + size_t src_length_; + std::string dest_; +}; + +class Operation { +public: + Operation(OperationType type, size_t position, + absl::variant value) + : type_(type), position_(position), value_(value) {} + + // constexpr bool operator<(const Operation& other) { return this->position_ < other.position_; } + // constexpr bool operator>(const Operation& other) { return this->position_ > other.position_; } + // constexpr bool operator==(const Operation& other) { return this->position_ == other.position_; + // } constexpr bool operator!=(const Operation& other) { return this->position_ != + // other.position_; } constexpr bool operator<=(const Operation& other) { return this->position_ + // <= other.position_; } constexpr bool operator>=(const Operation& other) { return + // this->position_ >= other.position_; } constexpr bool operator<=>(Operation &other) { return + // this->position_ <=> other.position_; } + + // private: + OperationType type_; + size_t position_; + absl::variant value_; +}; + +static constexpr bool operator<(const Operation& o1, const Operation& o2) { + return o1.position_ < o2.position_; +} + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/protocol.h b/contrib/sip_proxy/filters/network/source/protocol.h new file mode 100644 index 0000000000000..8a24effe302bb --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/protocol.h @@ -0,0 +1,60 @@ +#pragma once + +#include "envoy/buffer/buffer.h" + +#include "contrib/sip_proxy/filters/network/source/conn_state.h" +#include "contrib/sip_proxy/filters/network/source/metadata.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +class SipSettings { +public: + SipSettings(std::chrono::milliseconds transaction_timeout, std::string own_domain, + std::string domain_match_parameter_name) + : transaction_timeout_(transaction_timeout), own_domain_(own_domain), + domain_match_parameter_name_(domain_match_parameter_name) {} + std::chrono::milliseconds transactionTimeout() { return transaction_timeout_; } + std::string ownDomain() { return own_domain_; } + std::string domainMatchParamName() { return domain_match_parameter_name_; } + +private: + std::chrono::milliseconds transaction_timeout_; + std::string own_domain_; + std::string domain_match_parameter_name_; +}; + +/** + * A DirectResponse manipulates a Protocol to directly create a Sip response message. + */ +class DirectResponse { +public: + virtual ~DirectResponse() = default; + + enum class ResponseType { + // DirectResponse encodes MessageType::Reply with success payload + SuccessReply, + + // DirectResponse encodes MessageType::Reply with an exception payload + ErrorReply, + + // DirectResponse encodes MessageType::Exception + Exception, + }; + + /** + * Encodes the response via the given Protocol. + * @param metadata the MessageMetadata for the request that generated this response + * @param proto the Protocol to be used for message encoding + * @param buffer the Buffer into which the message should be encoded + * @return ResponseType indicating whether the message is a successful or error reply or an + * exception + */ + virtual ResponseType encode(MessageMetadata& metadata, Buffer::Instance& buffer) const PURE; +}; +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/router/BUILD b/contrib/sip_proxy/filters/network/source/router/BUILD new file mode 100644 index 0000000000000..114b13328da6c --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/router/BUILD @@ -0,0 +1,58 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_contrib_extension", + "envoy_cc_library", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_contrib_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":router_lib", + "//contrib/sip_proxy/filters/network/source/filters:factory_base_lib", + "//contrib/sip_proxy/filters/network/source/filters:filter_config_interface", + "//contrib/sip_proxy/filters/network/source/filters:well_known_names", + "//envoy/registry", + "@envoy_api//contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "router_interface", + hdrs = ["router.h"], + external_deps = ["abseil_optional"], + deps = [ + "//contrib/sip_proxy/filters/network/source:metadata_lib", + "//envoy/router:router_interface", + ], +) + +envoy_cc_library( + name = "router_lib", + srcs = ["router_impl.cc"], + hdrs = ["router_impl.h"], + deps = [ + ":router_interface", + "//contrib/sip_proxy/filters/network/source:app_exception_lib", + "//contrib/sip_proxy/filters/network/source:conn_manager_lib", + "//contrib/sip_proxy/filters/network/source:encoder_lib", + "//contrib/sip_proxy/filters/network/source:protocol_interface", + "//contrib/sip_proxy/filters/network/source/filters:filter_interface", + "//contrib/sip_proxy/filters/network/source/filters:well_known_names", + "//envoy/tcp:conn_pool_interface", + "//envoy/upstream:cluster_manager_interface", + "//envoy/upstream:load_balancer_interface", + "//envoy/upstream:thread_local_cluster_interface", + "//source/common/common:logger_lib", + "//source/common/http:header_utility_lib", + "//source/common/router:metadatamatchcriteria_lib", + "//source/common/upstream:load_balancer_lib", + "@envoy_api//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg_cc_proto", + ], +) diff --git a/contrib/sip_proxy/filters/network/source/router/config.cc b/contrib/sip_proxy/filters/network/source/router/config.cc new file mode 100644 index 0000000000000..fb00f9735cb92 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/router/config.cc @@ -0,0 +1,35 @@ +#include "contrib/sip_proxy/filters/network/source/router/config.h" + +#include "envoy/registry/registry.h" + +#include "contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.pb.h" +#include "contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.pb.validate.h" +#include "contrib/sip_proxy/filters/network/source/router/router_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace Router { + +SipFilters::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::network::sip_proxy::router::v3alpha::Router& proto_config, + const std::string& stat_prefix, Server::Configuration::FactoryContext& context) { + UNREFERENCED_PARAMETER(proto_config); + + return [&context, stat_prefix](SipFilters::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addDecoderFilter( + std::make_shared(context.clusterManager(), stat_prefix, context.scope())); + }; +} + +/** + * Static registration for the router filter. @see RegisterFactory. + */ +REGISTER_FACTORY(RouterFilterConfig, SipFilters::NamedSipFilterConfigFactory); + +} // namespace Router +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/router/config.h b/contrib/sip_proxy/filters/network/source/router/config.h new file mode 100644 index 0000000000000..95e1ff931017f --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/router/config.h @@ -0,0 +1,30 @@ +#pragma once + +#include "contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.pb.h" +#include "contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.pb.validate.h" +#include "contrib/sip_proxy/filters/network/source/filters/factory_base.h" +#include "contrib/sip_proxy/filters/network/source/filters/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace Router { + +class RouterFilterConfig + : public SipFilters::FactoryBase< + envoy::extensions::filters::network::sip_proxy::router::v3alpha::Router> { +public: + RouterFilterConfig() : FactoryBase(SipFilters::SipFilterNames::get().ROUTER) {} + +private: + SipFilters::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::network::sip_proxy::router::v3alpha::Router& proto_config, + const std::string& stat_prefix, Server::Configuration::FactoryContext& context) override; +}; + +} // namespace Router +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/router/router.h b/contrib/sip_proxy/filters/network/source/router/router.h new file mode 100644 index 0000000000000..d8a3b80ab3256 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/router/router.h @@ -0,0 +1,77 @@ +#pragma once + +#include +#include +#include + +#include "envoy/router/router.h" + +#include "contrib/sip_proxy/filters/network/source/metadata.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace Router { + +class TransactionInfo; +using TransactionInfos = std::map>; + +/** + * RouteEntry is an individual resolved route entry. + */ +class RouteEntry { +public: + virtual ~RouteEntry() = default; + + /** + * @return const std::string& the upstream cluster that owns the route. + */ + virtual const std::string& clusterName() const PURE; + + /** + * @return MetadataMatchCriteria* the metadata that a subset load balancer should match when + * selecting an upstream host + */ + virtual const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const PURE; +}; + +/** + * Route holds the RouteEntry for a request. + */ +class Route { +public: + virtual ~Route() = default; + + /** + * @return the route entry or nullptr if there is no matching route for the request. + */ + virtual const RouteEntry* routeEntry() const PURE; +}; + +using RouteConstSharedPtr = std::shared_ptr; + +/** + * The router configuration. + */ +class Config { +public: + virtual ~Config() = default; + + /** + * Based on the incoming Sip request transport and/or protocol data, determine the target + * route for the request. + * @param metadata MessageMetadata for the message to route + * @param random_value uint64_t used to select cluster affinity + * @return the route or nullptr if there is no matching route for the request. + */ + virtual RouteConstSharedPtr route(MessageMetadata& metadata) const PURE; +}; + +using ConfigConstSharedPtr = std::shared_ptr; + +} // namespace Router +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/router/router_impl.cc b/contrib/sip_proxy/filters/network/source/router/router_impl.cc new file mode 100644 index 0000000000000..e81dbabbe7860 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/router/router_impl.cc @@ -0,0 +1,487 @@ +#include "contrib/sip_proxy/filters/network/source/router/router_impl.h" + +#include + +#include "envoy/upstream/cluster_manager.h" + +#include "source/common/common/logger.h" +#include "source/common/common/utility.h" +#include "source/common/network/address_impl.h" +#include "source/common/router/metadatamatchcriteria_impl.h" + +#include "absl/strings/match.h" +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.pb.h" +#include "contrib/sip_proxy/filters/network/source/app_exception_impl.h" +#include "contrib/sip_proxy/filters/network/source/encoder.h" +#include "contrib/sip_proxy/filters/network/source/filters/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace Router { + +RouteEntryImplBase::RouteEntryImplBase( + const envoy::extensions::filters::network::sip_proxy::v3alpha::Route& route) + : cluster_name_(route.route().cluster()) {} + +const std::string& RouteEntryImplBase::clusterName() const { return cluster_name_; } + +const RouteEntry* RouteEntryImplBase::routeEntry() const { return this; } + +RouteConstSharedPtr RouteEntryImplBase::clusterEntry(const MessageMetadata& metadata) const { + UNREFERENCED_PARAMETER(metadata); + return shared_from_this(); +} + +GeneralRouteEntryImpl::GeneralRouteEntryImpl( + const envoy::extensions::filters::network::sip_proxy::v3alpha::Route& route) + : RouteEntryImplBase(route), domain_(route.match().domain()) {} + +RouteConstSharedPtr GeneralRouteEntryImpl::matches(MessageMetadata& metadata) const { + bool matches = metadata.domain().value() == domain_ || domain_ == "*"; + + if (matches) { + return clusterEntry(metadata); + } + + return nullptr; +} + +RouteMatcher::RouteMatcher( + const envoy::extensions::filters::network::sip_proxy::v3alpha::RouteConfiguration& config) { + using envoy::extensions::filters::network::sip_proxy::v3alpha::RouteMatch; + + for (const auto& route : config.routes()) { + switch (route.match().match_specifier_case()) { + case RouteMatch::MatchSpecifierCase::kDomain: + routes_.emplace_back(new GeneralRouteEntryImpl(route)); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } +} + +RouteConstSharedPtr RouteMatcher::route(MessageMetadata& metadata) const { + for (const auto& route : routes_) { + RouteConstSharedPtr route_entry = route->matches(metadata); + if (nullptr != route_entry) { + return route_entry; + } + } + + return nullptr; +} + +void Router::onDestroy() { + if (!callbacks_->transactionId().empty()) { + for (auto& kv : *transaction_infos_) { + auto transaction_info = kv.second; + try { + transaction_info->getTransaction(callbacks_->transactionId()); + transaction_info->deleteTransaction(callbacks_->transactionId()); + } catch (std::out_of_range const&) { + } + } + } +} + +void Router::setDecoderFilterCallbacks(SipFilters::DecoderFilterCallbacks& callbacks) { + callbacks_ = &callbacks; + transaction_infos_ = callbacks_->transactionInfos(); + settings_ = callbacks_->settings(); +} + +FilterStatus Router::transportBegin(MessageMetadataSharedPtr metadata) { + UNREFERENCED_PARAMETER(metadata); + return FilterStatus::Continue; +} + +FilterStatus Router::transportEnd() { return FilterStatus::Continue; } + +FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) { + if (upstream_request_ != nullptr) { + return FilterStatus::Continue; + } + + metadata_ = metadata; + route_ = callbacks_->route(); + if (!route_) { + ENVOY_STREAM_LOG(debug, "no route match domain {}", *callbacks_, metadata->domain().value()); + stats_.route_missing_.inc(); + callbacks_->sendLocalReply(AppException(AppExceptionType::UnknownMethod, "no route for method"), + true); + return FilterStatus::StopIteration; + } + + route_entry_ = route_->routeEntry(); + const std::string& cluster_name = route_entry_->clusterName(); + + Upstream::ThreadLocalCluster* cluster = cluster_manager_.getThreadLocalCluster(cluster_name); + if (!cluster) { + ENVOY_STREAM_LOG(debug, "unknown cluster '{}'", *callbacks_, cluster_name); + stats_.unknown_cluster_.inc(); + callbacks_->sendLocalReply(AppException(AppExceptionType::InternalError, + fmt::format("unknown cluster '{}'", cluster_name)), + true); + return FilterStatus::StopIteration; + } + + cluster_ = cluster->info(); + ENVOY_STREAM_LOG(debug, "cluster '{}' match domain {}", *callbacks_, cluster_name, + std::string(metadata->domain().value())); + + if (cluster_->maintenanceMode()) { + stats_.upstream_rq_maintenance_mode_.inc(); + callbacks_->sendLocalReply( + AppException(AppExceptionType::InternalError, + fmt::format("maintenance mode for cluster '{}'", cluster_name)), + true); + return FilterStatus::StopIteration; + } + + const std::shared_ptr options = + cluster_->extensionProtocolOptionsTyped(SipProxy); + + auto handle_affinity = [&](const std::shared_ptr options) { + if (options == nullptr || metadata->msgType() == MsgType::Response) { + return; + } + + if (metadata->methodType() != MethodType::Register && options->sessionAffinity()) { + if (metadata->routeEP().has_value()) { + auto host = metadata->routeEP().value(); + metadata->setDestination(host); + } + } + if (metadata->methodType() == MethodType::Register && options->registrationAffinity()) { + if (metadata->routeOpaque().has_value()) { + auto host = metadata->routeOpaque().value(); + metadata->setDestination(host); + } + } + }; + handle_affinity(options); + + auto& transaction_info = (*transaction_infos_)[cluster_name]; + + auto message_handler_with_loadbalancer = [&]() { + auto pool_data = cluster->tcpConnPool(Upstream::ResourcePriority::Default, this); + if (!pool_data) { + stats_.no_healthy_upstream_.inc(); + callbacks_->sendLocalReply( + AppException(AppExceptionType::InternalError, + fmt::format("no healthy upstream for '{}'", cluster_name)), + true); + return FilterStatus::StopIteration; + } + + ENVOY_STREAM_LOG(debug, "router decoding request", *callbacks_); + + Upstream::HostDescriptionConstSharedPtr host = pool_data->host(); + if (!host) { + return FilterStatus::StopIteration; + } + + if (auto upstream_request = + transaction_info->getUpstreamRequest(host->address()->ip()->addressAsString()); + upstream_request != nullptr) { + // There is action connection, reuse it. + upstream_request_ = upstream_request; + upstream_request_->setDecoderFilterCallbacks(*callbacks_); + ENVOY_STREAM_LOG(debug, "reuse upstream request", *callbacks_); + try { + transaction_info->getTransaction(std::string(metadata->transactionId().value())); + } catch (std::out_of_range const&) { + transaction_info->insertTransaction(std::string(metadata->transactionId().value()), + callbacks_, upstream_request_); + } + } else { + upstream_request_ = std::make_shared(*pool_data, transaction_info); + upstream_request_->setDecoderFilterCallbacks(*callbacks_); + transaction_info->insertUpstreamRequest(host->address()->ip()->addressAsString(), + upstream_request_); + ENVOY_STREAM_LOG(debug, "create new upstream request {}", *callbacks_, + host->address()->ip()->addressAsString()); + + try { + transaction_info->getTransaction(std::string(metadata->transactionId().value())); + } catch (std::out_of_range const&) { + transaction_info->insertTransaction(std::string(metadata->transactionId().value()), + callbacks_, upstream_request_); + } + } + return upstream_request_->start(); + }; + + if (metadata->destination().has_value()) { + auto host = metadata->destination().value(); + if (auto upstream_request = transaction_info->getUpstreamRequest(std::string(host)); + upstream_request != nullptr) { + // There is action connection, reuse it. + ENVOY_STREAM_LOG(debug, "reuse upstream request from EP {}", *callbacks_, host); + upstream_request_ = upstream_request; + + try { + transaction_info->getTransaction(std::string(metadata->transactionId().value())); + } catch (std::out_of_range const&) { + transaction_info->insertTransaction(std::string(metadata->transactionId().value()), + callbacks_, upstream_request_); + } + return upstream_request_->start(); + } else { + ENVOY_STREAM_LOG(debug, "get upstream request for {} failed.", *callbacks_, host); + message_handler_with_loadbalancer(); + } + } else { + ENVOY_STREAM_LOG(debug, "no destination.", *callbacks_); + message_handler_with_loadbalancer(); + } + + return FilterStatus::Continue; +} + +FilterStatus Router::messageEnd() { + // In case pool is not ready, save this into pending_request. + if (upstream_request_->connectionState() != ConnectionState::Connected) { + upstream_request_->addIntoPendingRequest(metadata_); + return FilterStatus::Continue; + } + + Buffer::OwnedImpl transport_buffer; + + // set EP/Opaque, used in upstream + metadata_->setEP(upstream_request_->getLocalIp()); + + std::shared_ptr encoder = std::make_shared(); + encoder->encode(metadata_, transport_buffer); + + ENVOY_STREAM_LOG(trace, "send buffer : {} bytes\n{}", *callbacks_, transport_buffer.length(), + transport_buffer.toString()); + + upstream_request_->write(transport_buffer, false); + return FilterStatus::Continue; +} + +const Network::Connection* Router::downstreamConnection() const { + if (callbacks_ != nullptr) { + return callbacks_->connection(); + } + + return nullptr; +} + +void Router::cleanup() { upstream_request_.reset(); } + +UpstreamRequest::UpstreamRequest(Upstream::TcpPoolData& pool_data, + std::shared_ptr transaction_info) + : conn_pool_data_(pool_data), transaction_info_(transaction_info), response_complete_(false) {} + +UpstreamRequest::~UpstreamRequest() { + if (conn_pool_handle_) { + conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + } +} + +FilterStatus UpstreamRequest::start() { + if (connectionState() != ConnectionState::NotConnected) { + return FilterStatus::Continue; + } + + ENVOY_LOG(info, "connecting {}", conn_pool_data_.host()->address()->asString()); + + setConnectionState(ConnectionState::Connecting); + conn_state_ = ConnectionState::Connecting; + + Tcp::ConnectionPool::Cancellable* handle = conn_pool_data_.newConnection(*this); + if (handle) { + // Pause while we wait for a connection. + conn_pool_handle_ = handle; + return FilterStatus::Continue; + } + + if (upstream_host_ == nullptr) { + return FilterStatus::StopIteration; + } + + return FilterStatus::Continue; +} + +void UpstreamRequest::releaseConnection(const bool close) { + if (conn_pool_handle_) { + conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + conn_pool_handle_ = nullptr; + } + + setConnectionState(ConnectionState::NotConnected); + + // The event triggered by close will also release this connection so clear conn_data_ before + // closing. + auto conn_data = std::move(conn_data_); + if (close && conn_data != nullptr) { + conn_data->connection().close(Network::ConnectionCloseType::NoFlush); + } +} + +void UpstreamRequest::resetStream() { releaseConnection(true); } + +void UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view, + Upstream::HostDescriptionConstSharedPtr host) { + ENVOY_LOG(info, "on pool failure"); + setConnectionState(ConnectionState::NotConnected); + conn_pool_handle_ = nullptr; + + // Mimic an upstream reset. + onUpstreamHostSelected(host); + UNREFERENCED_PARAMETER(reason); +} + +void UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) { + ENVOY_STREAM_LOG(trace, "onPoolReady", *callbacks_); + + conn_data_ = std::move(conn_data); + + onUpstreamHostSelected(host); + conn_data_->addUpstreamCallbacks(*this); + conn_pool_handle_ = nullptr; + + setConnectionState(ConnectionState::Connected); + + onRequestStart(); +} + +void UpstreamRequest::onRequestStart() { + if (!pending_request_.empty()) { + for (const auto& metadata : pending_request_) { + Buffer::OwnedImpl transport_buffer; + + // set EP/Opaque, used in upstream + metadata->setEP(getLocalIp()); + + std::shared_ptr encoder = std::make_shared(); + encoder->encode(metadata, transport_buffer); + + ENVOY_STREAM_LOG(trace, "send buffer : {} bytes\n{}", *callbacks_, transport_buffer.length(), + transport_buffer.toString()); + conn_data_->connection().write(transport_buffer, false); + } + pending_request_.clear(); + } +} + +void UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) { + upstream_host_ = host; +} + +void UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason reason) { + switch (reason) { + case ConnectionPool::PoolFailureReason::Overflow: + callbacks_->sendLocalReply( + AppException(AppExceptionType::InternalError, "sip upstream request: too many connections"), + true); + break; + case ConnectionPool::PoolFailureReason::LocalConnectionFailure: + // Should only happen if we closed the connection, due to an error condition, in which case + // we've already handled any possible downstream response. + callbacks_->resetDownstreamConnection(); + break; + case ConnectionPool::PoolFailureReason::RemoteConnectionFailure: + case ConnectionPool::PoolFailureReason::Timeout: + // TODO(zuercher): distinguish between these cases where appropriate (particularly timeout) + // if (!response_started_) { + // callbacks_->sendLocalReply( + // AppException( + // AppExceptionType::InternalError, + // fmt::format("connection failure '{}'", (upstream_host_ != nullptr) + // ? upstream_host_->address()->asString() + // : "to upstream")), + // true); + // return; + //} + + // Error occurred after a partial response, propagate the reset to the downstream. + callbacks_->resetDownstreamConnection(); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +SipFilters::DecoderFilterCallbacks* UpstreamRequest::getTransaction(std::string&& transaction_id) { + try { + return transaction_info_->getTransaction(std::move(transaction_id)).activeTrans(); + } catch (std::out_of_range const&) { + return nullptr; + } +} + +// Tcp::ConnectionPool::UpstreamCallbacks +void UpstreamRequest::onUpstreamData(Buffer::Instance& data, bool end_stream) { + UNREFERENCED_PARAMETER(end_stream); + upstream_buffer_.move(data); + auto response_decoder_ = std::make_unique(*this); + response_decoder_->onData(upstream_buffer_); +} + +void UpstreamRequest::onEvent(Network::ConnectionEvent event) { + ENVOY_LOG(info, "received upstream event {}", event); + switch (event) { + case Network::ConnectionEvent::RemoteClose: + ENVOY_STREAM_LOG(debug, "upstream remote close", *callbacks_); + break; + case Network::ConnectionEvent::LocalClose: + ENVOY_STREAM_LOG(debug, "upstream local close", *callbacks_); + break; + default: + // Connected is consumed by the connection pool. + NOT_REACHED_GCOVR_EXCL_LINE; + } + + releaseConnection(false); +} + +void UpstreamRequest::setDecoderFilterCallbacks(SipFilters::DecoderFilterCallbacks& callbacks) { + callbacks_ = &callbacks; +} + +bool ResponseDecoder::onData(Buffer::Instance& data) { + decoder_->onData(data); + return true; +} + +FilterStatus ResponseDecoder::transportBegin(MessageMetadataSharedPtr metadata) { + ENVOY_LOG(trace, "ResponseDecoder {}", metadata->rawMsg()); + if (metadata->transactionId().has_value()) { + auto transaction_id = metadata->transactionId().value(); + + auto active_trans = parent_.getTransaction(std::string(transaction_id)); + if (active_trans) { + active_trans->startUpstreamResponse(); + active_trans->upstreamData(metadata); + } else { + ENVOY_LOG(debug, "no active trans selected {}\n{}", transaction_id, metadata->rawMsg()); + return FilterStatus::StopIteration; + } + } else { + ENVOY_LOG(debug, "no active trans selected \n{}", metadata->rawMsg()); + return FilterStatus::StopIteration; + } + + return FilterStatus::Continue; +} + +absl::string_view ResponseDecoder::getLocalIp() { return parent_.getLocalIp(); } + +std::string ResponseDecoder::getOwnDomain() { return parent_.transactionInfo()->getOwnDomain(); } + +std::string ResponseDecoder::getDomainMatchParamName() { + return parent_.transactionInfo()->getDomainMatchParamName(); +} + +} // namespace Router +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/router/router_impl.h b/contrib/sip_proxy/filters/network/source/router/router_impl.h new file mode 100644 index 0000000000000..231d94c7a64f4 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/router/router_impl.h @@ -0,0 +1,440 @@ +#pragma once + +#include +#include +#include + +#include "envoy/router/router.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" +#include "envoy/tcp/conn_pool.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/load_balancer.h" +#include "envoy/upstream/thread_local_cluster.h" + +#include "source/common/common/logger.h" +#include "source/common/http/header_utility.h" +#include "source/common/upstream/load_balancer_impl.h" + +#include "absl/types/optional.h" +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.pb.h" +#include "contrib/sip_proxy/filters/network/source/conn_manager.h" +#include "contrib/sip_proxy/filters/network/source/decoder_events.h" +#include "contrib/sip_proxy/filters/network/source/filters/filter.h" +#include "contrib/sip_proxy/filters/network/source/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace Router { + +class RouteEntryImplBase : public RouteEntry, + public Route, + public std::enable_shared_from_this { +public: + RouteEntryImplBase(const envoy::extensions::filters::network::sip_proxy::v3alpha::Route& route); + + // Router::RouteEntry + const std::string& clusterName() const override; + const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override { + return metadata_match_criteria_.get(); + } + + // Router::Route + const RouteEntry* routeEntry() const override; + + virtual RouteConstSharedPtr matches(MessageMetadata& metadata) const PURE; + +protected: + RouteConstSharedPtr clusterEntry(const MessageMetadata& metadata) const; + bool headersMatch(const Http::HeaderMap& headers) const; + +private: + /* Not used + class DynamicRouteEntry : public RouteEntry, public Route { + public: + DynamicRouteEntry(const RouteEntryImplBase& parent, absl::string_view cluster_name) + : parent_(parent), cluster_name_(std::string(cluster_name)) {} + + // Router::RouteEntry + const std::string& clusterName() const override { return cluster_name_; } + const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override { + return parent_.metadataMatchCriteria(); + } + + // Router::Route + const RouteEntry* routeEntry() const override { return this; } + + private: + const RouteEntryImplBase& parent_; + const std::string cluster_name_; + }; */ + + const std::string cluster_name_; + Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_; +}; + +using RouteEntryImplBaseConstSharedPtr = std::shared_ptr; + +// match domain from route header or request_uri, this is the more general way +class GeneralRouteEntryImpl : public RouteEntryImplBase { +public: + GeneralRouteEntryImpl( + const envoy::extensions::filters::network::sip_proxy::v3alpha::Route& route); + + // RouteEntryImplBase + RouteConstSharedPtr matches(MessageMetadata& metadata) const override; + +private: + const std::string domain_; +}; + +class RouteMatcher { +public: + RouteMatcher(const envoy::extensions::filters::network::sip_proxy::v3alpha::RouteConfiguration&); + + RouteConstSharedPtr route(MessageMetadata& metadata) const; + +private: + std::vector routes_; +}; + +#define ALL_SIP_ROUTER_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER(route_missing) \ + COUNTER(unknown_cluster) \ + COUNTER(upstream_rq_maintenance_mode) \ + COUNTER(no_healthy_upstream) + +struct RouterStats { + ALL_SIP_ROUTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) +}; + +class UpstreamRequest; +class TransactionInfoItem : public Logger::Loggable { +public: + TransactionInfoItem(SipFilters::DecoderFilterCallbacks* active_trans, + std::shared_ptr upstream_request) + : active_trans_(active_trans), upstream_request_(upstream_request) {} + + ~TransactionInfoItem() = default; + + void resetTrans() { active_trans_->onReset(); } + + void appendMessageList(std::shared_ptr message) { messages_.push_back(message); } + + SipFilters::DecoderFilterCallbacks* activeTrans() const { return active_trans_; } + std::shared_ptr upstreamRequest() const { return upstream_request_; } + + SystemTime timestamp() const { return this->active_trans_->streamInfo().startTime(); } + void toDelete() { deleted_ = true; } + bool deleted() { return deleted_; } + +private: + std::list> messages_; + SipFilters::DecoderFilterCallbacks* active_trans_; + std::shared_ptr upstream_request_; + std::chrono::system_clock::time_point timestamp_; + bool deleted_{false}; +}; + +struct ThreadLocalTransactionInfo : public ThreadLocal::ThreadLocalObject, + public Logger::Loggable { + ThreadLocalTransactionInfo(std::shared_ptr parent, Event::Dispatcher& dispatcher, + std::chrono::milliseconds transaction_timeout, std::string own_domain, + std::string domain_match_parameter_name) + : parent_(parent), dispatcher_(dispatcher), transaction_timeout_(transaction_timeout), + own_domain_(own_domain), domain_match_parameter_name_(domain_match_parameter_name) { + audit_timer_ = dispatcher.createTimer([this]() -> void { auditTimerAction(); }); + audit_timer_->enableTimer(std::chrono::seconds(2)); + } + absl::flat_hash_map> transaction_info_map_{}; + absl::flat_hash_map> upstream_request_map_{}; + + std::shared_ptr parent_; + Event::Dispatcher& dispatcher_; + Event::TimerPtr audit_timer_; + std::chrono::milliseconds transaction_timeout_; + std::string own_domain_; + std::string domain_match_parameter_name_; + + void auditTimerAction() { + const auto p1 = dispatcher_.timeSource().systemTime(); + for (auto it = transaction_info_map_.cbegin(); it != transaction_info_map_.cend();) { + if (it->second->deleted()) { + transaction_info_map_.erase(it++); + continue; + } + + auto diff = + std::chrono::duration_cast(p1 - it->second->timestamp()); + if (diff.count() >= transaction_timeout_.count()) { + it->second->resetTrans(); + // transaction_info_map_.erase(it++); + } + + ++it; + /* In single thread, this condition should be cover in line 160 + * And Envoy should be single thread + if (it->second->deleted()) { + transaction_info_map_.erase(it++); + } else { + ++it; + }*/ + } + audit_timer_->enableTimer(std::chrono::seconds(2)); + } +}; + +class TransactionInfo : public std::enable_shared_from_this, + Logger::Loggable { +public: + TransactionInfo(const std::string& cluster_name, ThreadLocal::SlotAllocator& tls, + std::chrono::milliseconds transaction_timeout, std::string own_domain, + std::string domain_match_parameter_name) + : cluster_name_(cluster_name), tls_(tls.allocateSlot()), + transaction_timeout_(transaction_timeout), own_domain_(own_domain), + domain_match_parameter_name_(domain_match_parameter_name) {} + + void init() { + // Note: `this` and `cluster_name` have a a lifetime of the filter. + // That may be shorter than the tls callback if the listener is torn down shortly after it is + // created. We use a weak pointer to make sure this object outlives the tls callbacks. + std::weak_ptr this_weak_ptr = this->shared_from_this(); + tls_->set( + [this_weak_ptr](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { + if (auto this_shared_ptr = this_weak_ptr.lock()) { + return std::make_shared( + this_shared_ptr, dispatcher, this_shared_ptr->transaction_timeout_, + this_shared_ptr->own_domain_, this_shared_ptr->domain_match_parameter_name_); + } + return nullptr; + }); + + (void)cluster_name_; + } + ~TransactionInfo() = default; + + void insertTransaction(std::string&& transaction_id, + SipFilters::DecoderFilterCallbacks* active_trans, + std::shared_ptr upstream_request) { + tls_->getTyped().transaction_info_map_.emplace(std::make_pair( + transaction_id, std::make_shared(active_trans, upstream_request))); + } + + void deleteTransaction(std::string&& transaction_id) { + tls_->getTyped() + .transaction_info_map_.at(transaction_id) + ->toDelete(); + } + + TransactionInfoItem& getTransaction(std::string&& transaction_id) { + return *(tls_->getTyped().transaction_info_map_.at(transaction_id)); + } + + void insertUpstreamRequest(const std::string& host, + std::shared_ptr upstream_request) { + tls_->getTyped().upstream_request_map_.emplace( + std::make_pair(host, upstream_request)); + } + + std::shared_ptr getUpstreamRequest(const std::string& host) { + try { + return tls_->getTyped().upstream_request_map_.at(host); + } catch (std::out_of_range) { + return nullptr; + } + } + + void deleteUpstreamRequest(const std::string& host) { + tls_->getTyped().upstream_request_map_.erase(host); + } + + std::string getOwnDomain() { return own_domain_; } + + std::string getDomainMatchParamName() { return domain_match_parameter_name_; } + +private: + const std::string cluster_name_; + ThreadLocal::SlotPtr tls_; + std::chrono::milliseconds transaction_timeout_; + std::string own_domain_; + std::string domain_match_parameter_name_; +}; + +class Router : public Upstream::LoadBalancerContextBase, + public virtual DecoderEventHandler, + public SipFilters::DecoderFilter, + Logger::Loggable { +public: + Router(Upstream::ClusterManager& cluster_manager, const std::string& stat_prefix, + Stats::Scope& scope) + : cluster_manager_(cluster_manager), stats_(generateStats(stat_prefix, scope)) {} + + // SipFilters::DecoderFilter + void onDestroy() override; + void setDecoderFilterCallbacks(SipFilters::DecoderFilterCallbacks& callbacks) override; + + // DecoderEventHandler + FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override; + FilterStatus transportEnd() override; + FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override; + FilterStatus messageEnd() override; + + // Upstream::LoadBalancerContext + const Network::Connection* downstreamConnection() const override; + const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override { + if (route_entry_) { + return route_entry_->metadataMatchCriteria(); + } + return nullptr; + } + + bool shouldSelectAnotherHost(const Upstream::Host& host) override { + if (!metadata_->destination().has_value()) { + return false; + } + return host.address()->ip()->addressAsString() != metadata_->destination().value(); + } + + RouterStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return RouterStats{ALL_SIP_ROUTER_STATS(POOL_COUNTER_PREFIX(scope, prefix), + POOL_GAUGE_PREFIX(scope, prefix), + POOL_HISTOGRAM_PREFIX(scope, prefix))}; + } + + void cleanup(); + + Upstream::ClusterManager& cluster_manager_; + RouterStats stats_; + + RouteConstSharedPtr route_{}; + const RouteEntry* route_entry_{}; + MessageMetadataSharedPtr metadata_{}; + + std::shared_ptr upstream_request_; + SipFilters::DecoderFilterCallbacks* callbacks_{}; + Upstream::ClusterInfoConstSharedPtr cluster_; + std::shared_ptr transaction_infos_{}; + std::shared_ptr settings_; +}; + +class ThreadLocalActiveConn; +class ResponseDecoder : public DecoderCallbacks, + public DecoderEventHandler, + public Logger::Loggable { +public: + ResponseDecoder(UpstreamRequest& parent) + : parent_(parent), decoder_(std::make_unique(*this)) {} + bool onData(Buffer::Instance& data); + + // DecoderEventHandler + FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override { + UNREFERENCED_PARAMETER(metadata); + return FilterStatus::Continue; + } + FilterStatus messageEnd() override { return FilterStatus::Continue; }; + FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override; + FilterStatus transportEnd() override { return FilterStatus::Continue; } + + // DecoderCallbacks + DecoderEventHandler& newDecoderEventHandler(MessageMetadataSharedPtr metadata) override { + UNREFERENCED_PARAMETER(metadata); + return *this; + } + absl::string_view getLocalIp() override; + std::string getOwnDomain() override; + std::string getDomainMatchParamName() override; + +private: + UpstreamRequest& parent_; + DecoderPtr decoder_; +}; + +using ResponseDecoderPtr = std::unique_ptr; + +class UpstreamRequest : public Tcp::ConnectionPool::Callbacks, + public Tcp::ConnectionPool::UpstreamCallbacks, + public std::enable_shared_from_this, + public Logger::Loggable { +public: + UpstreamRequest(Upstream::TcpPoolData& pool_data, + std::shared_ptr transaction_info); + ~UpstreamRequest() override; + FilterStatus start(); + void resetStream(); + void releaseConnection(bool close); + + SipFilters::DecoderFilterCallbacks* getTransaction(std::string&& transaction_id); + + // Tcp::ConnectionPool::Callbacks + void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) override; + void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, + Upstream::HostDescriptionConstSharedPtr host) override; + + void onRequestStart(); + void onRequestComplete(); + void onResponseComplete(); + void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host); + void onResetStream(ConnectionPool::PoolFailureReason reason); + + // Tcp::ConnectionPool::UpstreamCallbacks + void onUpstreamData(Buffer::Instance& data, bool end_stream) override; + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + void setDecoderFilterCallbacks(SipFilters::DecoderFilterCallbacks& callbacks); + + void addIntoPendingRequest(MessageMetadataSharedPtr metadata) { + if (pending_request_.size() < 1000000) { + pending_request_.push_back(metadata); + } else { + ENVOY_LOG(warn, "pending request is full, drop this request. size {} request {}", + pending_request_.size(), metadata->rawMsg()); + } + } + + ConnectionState connectionState() { return conn_state_; } + void setConnectionState(ConnectionState state) { conn_state_ = state; } + void write(Buffer::Instance& data, bool end_stream) { + return conn_data_->connection().write(data, end_stream); + } + + absl::string_view getLocalIp() { + ENVOY_LOG( + debug, "Local ip: {}", + conn_data_->connection().connectionInfoProvider().localAddress()->ip()->addressAsString()); + return conn_data_->connection() + .connectionInfoProvider() + .localAddress() + ->ip() + ->addressAsString(); + } + + std::shared_ptr transactionInfo() { return transaction_info_; } + +private: + Upstream::TcpPoolData& conn_pool_data_; + + Tcp::ConnectionPool::Cancellable* conn_pool_handle_{}; + Tcp::ConnectionPool::ConnectionDataPtr conn_data_; + Upstream::HostDescriptionConstSharedPtr upstream_host_; + ConnectionState conn_state_{ConnectionState::NotConnected}; + + std::shared_ptr transaction_info_; + SipFilters::DecoderFilterCallbacks* callbacks_{}; + std::list pending_request_; + Buffer::OwnedImpl upstream_buffer_; + + bool request_complete_ : 1; + bool response_complete_ : 1; +}; + +} // namespace Router +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/sip.h b/contrib/sip_proxy/filters/network/source/sip.h new file mode 100644 index 0000000000000..c678573825318 --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/sip.h @@ -0,0 +1,68 @@ +#pragma once + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +enum class HeaderType { + TopLine, + CallId, + Via, + To, + From, + Route, + Contact, + RRoute, + Cseq, + Path, + Event, + SRoute, + WAuth, + Auth, + Other, + InvalidFormat +}; + +enum class MsgType { Request, Response, ErrorMsg }; + +enum class MethodType { + Invite, + Register, + Update, + Refer, + Subscribe, + Notify, + Ack, + Bye, + Cancel, + Ok200, + Failure4xx, + NullMethod +}; + +enum class AppExceptionType { + Unknown = 0, + UnknownMethod = 1, + InvalidMessageType = 2, + WrongMethodName = 3, + BadSequenceId = 4, + MissingResult = 5, + InternalError = 6, + ProtocolError = 7, + InvalidTransform = 8, + InvalidProtocol = 9, + // FBThrift values. + // See https://github.com/facebook/fbthrift/blob/master/thrift/lib/cpp/TApplicationException.h#L52 + UnsupportedClientType = 10, + LoadShedding = 11, + Timeout = 12, + InjectedFailure = 13, + ChecksumMismatch = 14, + Interruption = 15, +}; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/source/stats.h b/contrib/sip_proxy/filters/network/source/stats.h new file mode 100644 index 0000000000000..7077d933ff79b --- /dev/null +++ b/contrib/sip_proxy/filters/network/source/stats.h @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +/** + * All sip filter stats. @see stats_macros.h + */ +#define ALL_SIP_FILTER_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER(cx_destroy_local_with_active_rq) \ + COUNTER(cx_destroy_remote_with_active_rq) \ + COUNTER(request) \ + COUNTER(response) \ + COUNTER(response_error) \ + COUNTER(response_exception) \ + COUNTER(response_reply) \ + COUNTER(response_success) \ + GAUGE(request_active, Accumulate) \ + HISTOGRAM(request_time_ms, Milliseconds) + +/** + * Struct definition for all sip proxy stats. @see stats_macros.h + */ +struct SipFilterStats { + ALL_SIP_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) + + static SipFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return SipFilterStats{ALL_SIP_FILTER_STATS(POOL_COUNTER_PREFIX(scope, prefix), + POOL_GAUGE_PREFIX(scope, prefix), + POOL_HISTOGRAM_PREFIX(scope, prefix))}; + } +}; + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/test/BUILD b/contrib/sip_proxy/filters/network/test/BUILD new file mode 100644 index 0000000000000..f78a9b9bdafea --- /dev/null +++ b/contrib/sip_proxy/filters/network/test/BUILD @@ -0,0 +1,120 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_cc_test", + "envoy_cc_test_library", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_mock( + name = "mocks", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = [ + "//contrib/sip_proxy/filters/network/source:conn_manager_lib", + "//contrib/sip_proxy/filters/network/source:protocol_interface", + "//contrib/sip_proxy/filters/network/source/filters:factory_base_lib", + "//contrib/sip_proxy/filters/network/source/filters:filter_interface", + "//contrib/sip_proxy/filters/network/source/router:router_interface", + "//test/mocks/network:network_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:printers_lib", + ], +) + +envoy_cc_test_library( + name = "utility_lib", + hdrs = ["utility.h"], + deps = [ + "//contrib/sip_proxy/filters/network/source:sip_lib", + "//source/common/buffer:buffer_lib", + "//source/common/common:byte_order_lib", + "//test/common/buffer:utility_lib", + "@envoy_api//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "app_exception_impl_test", + srcs = ["app_exception_impl_test.cc"], + deps = [ + ":mocks", + "//contrib/sip_proxy/filters/network/source:app_exception_lib", + "//test/test_common:printers_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + deps = [ + ":mocks", + "//contrib/sip_proxy/filters/network/source:config", + "//contrib/sip_proxy/filters/network/source/router:config", + "//test/mocks/server:factory_context_mocks", + "//test/test_common:registry_lib", + "@envoy_api//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "conn_manager_test", + srcs = ["conn_manager_test.cc"], + deps = [ + ":mocks", + ":utility_lib", + "//contrib/sip_proxy/filters/network/source:config", + "//contrib/sip_proxy/filters/network/source:conn_manager_lib", + "//contrib/sip_proxy/filters/network/source:decoder_lib", + "//contrib/sip_proxy/filters/network/source/filters:filter_interface", + "//contrib/sip_proxy/filters/network/source/router:config", + "//contrib/sip_proxy/filters/network/source/router:router_interface", + "//test/mocks/network:network_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/test_common:printers_lib", + "@envoy_api//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "decoder_test", + srcs = ["decoder_test.cc"], + deps = [ + ":mocks", + ":utility_lib", + "//contrib/sip_proxy/filters/network/source:app_exception_lib", + "//contrib/sip_proxy/filters/network/source:config", + "//contrib/sip_proxy/filters/network/source:conn_manager_lib", + "//contrib/sip_proxy/filters/network/source:decoder_lib", + "//contrib/sip_proxy/filters/network/source/filters:filter_interface", + "//contrib/sip_proxy/filters/network/source/router:config", + "//contrib/sip_proxy/filters/network/source/router:router_interface", + "//test/mocks/network:network_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/test_common:printers_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_cc_test( + name = "router_test", + srcs = ["router_test.cc"], + deps = [ + ":mocks", + ":utility_lib", + "//contrib/sip_proxy/filters/network/source:app_exception_lib", + "//contrib/sip_proxy/filters/network/source:config", + "//contrib/sip_proxy/filters/network/source/router:config", + "//contrib/sip_proxy/filters/network/source/router:router_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/upstream:host_mocks", + "//test/test_common:printers_lib", + "//test/test_common:registry_lib", + ], +) diff --git a/contrib/sip_proxy/filters/network/test/app_exception_impl_test.cc b/contrib/sip_proxy/filters/network/test/app_exception_impl_test.cc new file mode 100644 index 0000000000000..9380caff6f2c4 --- /dev/null +++ b/contrib/sip_proxy/filters/network/test/app_exception_impl_test.cc @@ -0,0 +1,24 @@ +#include "source/common/buffer/buffer_impl.h" + +#include "contrib/sip_proxy/filters/network/source/app_exception_impl.h" +#include "contrib/sip_proxy/filters/network/test/mocks.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +TEST(AppExceptionImplTest, CopyConstructor) { + AppException app_ex(AppExceptionType::InternalError, "msg"); + AppException copy(app_ex); + + EXPECT_EQ(app_ex.type_, copy.type_); + EXPECT_STREQ("msg", copy.what()); +} + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/test/config_test.cc b/contrib/sip_proxy/filters/network/test/config_test.cc new file mode 100644 index 0000000000000..940491491108c --- /dev/null +++ b/contrib/sip_proxy/filters/network/test/config_test.cc @@ -0,0 +1,180 @@ +#include + +#include "test/mocks/server/factory_context.h" +#include "test/test_common/registry.h" + +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.pb.validate.h" +#include "contrib/sip_proxy/filters/network/source/config.h" +#include "contrib/sip_proxy/filters/network/source/filters/factory_base.h" +#include "contrib/sip_proxy/filters/network/test/mocks.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +namespace { + +envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy +parseSipProxyFromYaml(const std::string& yaml) { + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy sip_proxy; + TestUtility::loadFromYaml(yaml, sip_proxy); + return sip_proxy; +} +} // namespace + +class SipFilterConfigTestBase { +public: + void testConfig(envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy& config) { + Network::FilterFactoryCb cb; + EXPECT_NO_THROW({ cb = factory_.createFilterFactoryFromProto(config, context_); }); + EXPECT_TRUE(factory_.isTerminalFilterByProto(config, context_)); + + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); + } + + NiceMock context_; + SipProxyFilterConfigFactory factory_; +}; + +class SipFilterConfigTest : public testing::Test, public SipFilterConfigTestBase {}; + +TEST_F(SipFilterConfigTest, ValidateFail) { + EXPECT_THROW(factory_.createFilterFactoryFromProto( + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy(), context_), + ProtoValidationException); +} + +TEST_F(SipFilterConfigTest, ValidProtoConfiguration) { + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy config{}; + config.set_stat_prefix("my_stat_prefix"); + + testConfig(config); +} + +TEST_F(SipFilterConfigTest, SipProxyWithEmptyProto) { + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy config = + *dynamic_cast( + factory_.createEmptyConfigProto().get()); + config.set_stat_prefix("my_stat_prefix"); + + testConfig(config); +} + +// Test config with an invalid cluster_header. +TEST_F(SipFilterConfigTest, RouterConfigWithValidCluster) { + const std::string yaml = R"EOF( +stat_prefix: sip +route_config: + name: local_route + routes: + match: + domain: A + route: + cluster: A +sip_filters: + - name: envoy.filters.sip.router +)EOF"; + + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy config = + parseSipProxyFromYaml(yaml); + std::string cluster = "A"; + config.mutable_route_config()->mutable_routes()->at(0).mutable_route()->set_cluster(cluster); + EXPECT_NO_THROW({ factory_.createFilterFactoryFromProto(config, context_); }); +} + +// Test config with an explicitly defined router filter. +TEST_F(SipFilterConfigTest, SipProxyWithExplicitRouterConfig) { + const std::string yaml = R"EOF( +stat_prefix: sip +route_config: + name: local_route +sip_filters: + - name: envoy.filters.sip.router +)EOF"; + + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy config = + parseSipProxyFromYaml(yaml); + testConfig(config); +} + +// Test config with an unknown filter. +TEST_F(SipFilterConfigTest, SipProxyWithUnknownFilter) { + const std::string yaml = R"EOF( +stat_prefix: sip +route_config: + name: local_route +sip_filters: + - name: no_such_filter + - name: envoy.filters.sip.router +)EOF"; + + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy config = + parseSipProxyFromYaml(yaml); + + EXPECT_THROW_WITH_REGEX(factory_.createFilterFactoryFromProto(config, context_), EnvoyException, + "no_such_filter"); +} + +// Test config with multiple filters. +TEST_F(SipFilterConfigTest, SipProxyWithMultipleFilters) { + const std::string yaml = R"EOF( +stat_prefix: ingress +route_config: + name: local_route +sip_filters: + - name: envoy.filters.sip.mock_filter + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + key: value + - name: envoy.filters.sip.router +settings: + transaction_timeout: 32s + own_domain: pcsf-cfed.cncs.svc.cluster.local + domain_match_parameter_name: x-suri +)EOF"; + + SipFilters::MockFilterConfigFactory factory; + Registry::InjectFactory registry(factory); + + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy config = + parseSipProxyFromYaml(yaml); + testConfig(config); + + EXPECT_EQ(1, factory.config_struct_.fields_size()); + EXPECT_EQ("value", factory.config_struct_.fields().at("key").string_value()); + EXPECT_EQ("sip.ingress.", factory.config_stat_prefix_); +} + +// Test SipProtocolOptions +TEST_F(SipFilterConfigTest, SipProtocolOptions) { + const std::string yaml = R"EOF( +session_affinity: true +registration_affinity: true +)EOF"; + + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProtocolOptions config; + TestUtility::loadFromYaml(yaml, config); + + NiceMock context; + const auto options = std::make_shared(config); + EXPECT_CALL(*context.cluster_manager_.thread_local_cluster_.cluster_.info_, + extensionProtocolOptions(_)) + .WillRepeatedly(Return(options)); + + EXPECT_EQ(true, options->sessionAffinity()); + EXPECT_EQ(true, options->registrationAffinity()); +} + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/test/conn_manager_test.cc b/contrib/sip_proxy/filters/network/test/conn_manager_test.cc new file mode 100644 index 0000000000000..1d8024a431085 --- /dev/null +++ b/contrib/sip_proxy/filters/network/test/conn_manager_test.cc @@ -0,0 +1,718 @@ +#include + +#include "source/common/buffer/buffer_impl.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/test_common/printers.h" + +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.pb.validate.h" +#include "contrib/sip_proxy/filters/network/source/app_exception_impl.h" +#include "contrib/sip_proxy/filters/network/source/config.h" +#include "contrib/sip_proxy/filters/network/source/conn_manager.h" +#include "contrib/sip_proxy/filters/network/source/encoder.h" +#include "contrib/sip_proxy/filters/network/test/mocks.h" +#include "contrib/sip_proxy/filters/network/test/utility.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +class TestConfigImpl : public ConfigImpl { +public: + TestConfigImpl(envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy proto_config, + Server::Configuration::MockFactoryContext& context, + SipFilters::DecoderFilterSharedPtr decoder_filter, SipFilterStats& stats) + : ConfigImpl(proto_config, context), decoder_filter_(decoder_filter), stats_(stats) {} + + // ConfigImpl + SipFilterStats& stats() override { return stats_; } + + SipFilters::DecoderFilterSharedPtr custom_filter_; + SipFilters::DecoderFilterSharedPtr decoder_filter_; + SipFilterStats& stats_; +}; + +class SipConnectionManagerTest : public testing::Test { +public: + SipConnectionManagerTest() + : stats_(SipFilterStats::generateStats("test.", store_)), + transaction_infos_(std::make_shared()) {} + ~SipConnectionManagerTest() override { + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + } + + void initializeFilter() { initializeFilter(""); } + + void initializeFilter(const std::string& yaml) { + // Destroy any existing filter first. + filter_ = nullptr; + + for (const auto& counter : store_.counters()) { + counter->reset(); + } + + if (yaml.empty()) { + proto_config_.set_stat_prefix("test"); + } else { + TestUtility::loadFromYaml(yaml, proto_config_); + TestUtility::validate(proto_config_); + } + + proto_config_.set_stat_prefix("test"); + + decoder_filter_ = std::make_shared>(); + + config_ = std::make_unique(proto_config_, context_, decoder_filter_, stats_); + EXPECT_EQ(config_->settings()->transactionTimeout(), std::chrono::milliseconds(32000)); + if (custom_filter_) { + config_->custom_filter_ = custom_filter_; + } + + ON_CALL(random_, random()).WillByDefault(Return(42)); + filter_ = std::make_unique( + *config_, random_, filter_callbacks_.connection_.dispatcher_.timeSource(), + transaction_infos_); + filter_->initializeReadFilterCallbacks(filter_callbacks_); + filter_->onNewConnection(); + + // NOP currently. + filter_->onAboveWriteBufferHighWatermark(); + filter_->onBelowWriteBufferLowWatermark(); + } + + void + sendLocalReply(Envoy::Extensions::NetworkFilters::SipProxy::DirectResponse::ResponseType type) { + const std::string yaml = R"EOF( +stat_prefix: egress +route_config: + name: local_route + routes: + - match: + domain: "test" + route: + cluster: "test" +settings: + transaction_timeout: 32s + own_domain: pcsf-cfed.cncs.svc.cluster.local + domain_match_parameter_name: x-suri +)EOF"; + initializeFilter(yaml); + MessageMetadata metadata; + const MockDirectResponse response; + EXPECT_CALL(response, encode(_, _)).WillRepeatedly(Return(type)); + filter_->sendLocalReply(metadata, response, true); + } + + void upstreamDataTest() { + const std::string yaml = R"EOF( +stat_prefix: egress +route_config: + name: local_route + routes: + - match: + domain: "test" + route: + cluster: "test" +settings: + transaction_timeout: 32s + own_domain: pcsf-cfed.cncs.svc.cluster.local + domain_match_parameter_name: x-suri +)EOF"; + initializeFilter(yaml); + + const std::string SIP_INVITE_WRONG_CONTENT_LENGTH = + "INVITE sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "From: ;tag=1\x0d\x0a" + "To: \x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: \x0d\x0a" + "Supported: 100rel\x0d\x0a" + "Route: \x0d\x0a" + "P-Asserted-Identity: \x0d\x0a" + "Allow: UPDATE,INVITE,ACK,CANCEL,BYE,PRACK,REFER,MESSAGE,INFO\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Type: application/sdp\x0d\x0a" + "Content-Length: 300\x0d\x0a" + "\x0d\x0a" + "v=0\x0d\x0a" + "o=PCTEL 256 2 IN IP4 11.0.0.10\x0d\x0a" + "c=IN IP4 11.0.0.10\x0d\x0a" + "m=audio 4030 RTP/AVP 0 8\x0d\x0a" + "a=rtpmap:0 PCMU/8000\x0d\x0a" + "a=rtpmap:8 PCMU/8000\x0d\x0a"; + + buffer_.add(SIP_INVITE_WRONG_CONTENT_LENGTH); + + // The "Content-Length" is larger to make reassemble do not call complete() + filter_->decoder_->reassemble(buffer_); + filter_->decoder_->metadata_ = std::make_shared(buffer_.toString()); + filter_->decoder_->decode(); + ConnectionManager::ActiveTransPtr trans = + std::make_unique(*filter_, filter_->decoder_->metadata()); + trans->startUpstreamResponse(); + trans->upstreamData(filter_->decoder_->metadata_); + + // TransportBegin + struct MockResponseDecoderTransportBegin : public ConnectionManager::ResponseDecoder { + MockResponseDecoderTransportBegin(ConnectionManager::ActiveTrans& parent) + : ConnectionManager::ResponseDecoder(parent) {} + FilterStatus transportBegin(MessageMetadataSharedPtr) override { + return FilterStatus::StopIteration; + } + }; + MockResponseDecoderTransportBegin decoder_transportBegin(*trans); + trans->response_decoder_ = + std::make_unique(decoder_transportBegin); + trans->upstreamData(filter_->decoder_->metadata_); + + // MessageBegin + struct MockResponseDecoderMessageBegin : public ConnectionManager::ResponseDecoder { + MockResponseDecoderMessageBegin(ConnectionManager::ActiveTrans& parent) + : ConnectionManager::ResponseDecoder(parent) {} + FilterStatus messageBegin(MessageMetadataSharedPtr) override { + return FilterStatus::StopIteration; + } + }; + MockResponseDecoderMessageBegin decoder_messageBegin(*trans); + trans->response_decoder_ = + std::make_unique(decoder_messageBegin); + trans->upstreamData(filter_->decoder_->metadata_); + + // MessageEnd + struct MockResponseDecoderMessageEnd : public ConnectionManager::ResponseDecoder { + MockResponseDecoderMessageEnd(ConnectionManager::ActiveTrans& parent) + : ConnectionManager::ResponseDecoder(parent) {} + FilterStatus messageEnd() override { return FilterStatus::StopIteration; } + }; + MockResponseDecoderMessageEnd decoder_messageEnd(*trans); + trans->response_decoder_ = std::make_unique(decoder_messageEnd); + trans->upstreamData(filter_->decoder_->metadata_); + EXPECT_NE(nullptr, trans->connection()); + + // TransportEnd + struct MockResponseDecoderTransportEnd : public ConnectionManager::ResponseDecoder { + MockResponseDecoderTransportEnd(ConnectionManager::ActiveTrans& parent) + : ConnectionManager::ResponseDecoder(parent) {} + FilterStatus transportEnd() override { return FilterStatus::StopIteration; } + }; + MockResponseDecoderTransportEnd decoder_transportEnd(*trans); + trans->response_decoder_ = + std::make_unique(decoder_transportEnd); + trans->upstreamData(filter_->decoder_->metadata_); + + // AppException + struct MockResponseDecoderAppException : public ConnectionManager::ResponseDecoder { + MockResponseDecoderAppException(ConnectionManager::ActiveTrans& parent) + : ConnectionManager::ResponseDecoder(parent) {} + FilterStatus transportBegin(MessageMetadataSharedPtr) override { + throw AppException(AppExceptionType::ProtocolError, "MockResponseDecoderAppException"); + } + }; + MockResponseDecoderAppException decoder_appException(*trans); + trans->response_decoder_ = + std::make_unique(decoder_appException); + try { + trans->upstreamData(filter_->decoder_->metadata_); + } catch (const EnvoyException& ex) { + filter_->stats_.response_exception_.inc(); + EXPECT_EQ(1U, filter_->stats_.response_exception_.value()); + } + + // EnvoyException + struct MockResponseDecoderEnvoyException : public ConnectionManager::ResponseDecoder { + MockResponseDecoderEnvoyException(ConnectionManager::ActiveTrans& parent) + : ConnectionManager::ResponseDecoder(parent) {} + FilterStatus transportBegin(MessageMetadataSharedPtr) override { + throw EnvoyException("MockResponseDecoderEnvoyException"); + } + }; + MockResponseDecoderEnvoyException decoder_envoyException(*trans); + trans->response_decoder_ = + std::make_unique(decoder_envoyException); + try { + trans->upstreamData(filter_->decoder_->metadata_); + } catch (const EnvoyException& ex) { + filter_->stats_.response_exception_.inc(); + EXPECT_EQ(2U, filter_->stats_.response_exception_.value()); + } + + // transportEnd throw envoyException + filter_->read_callbacks_->connection().setDelayedCloseTimeout(std::chrono::milliseconds(1)); + filter_->read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); + ConnectionManager::ActiveTransPtr trans1 = + std::make_unique(*filter_, filter_->decoder_->metadata()); + try { + ConnectionManager::ResponseDecoder response_decoder(*trans1); + response_decoder.newDecoderEventHandler(filter_->decoder_->metadata()); + // transportEnd throw envoyException + response_decoder.onData(filter_->decoder_->metadata()); + } catch (const EnvoyException& ex) { + filter_->stats_.response_exception_.inc(); + EXPECT_EQ(2U, filter_->stats_.response_exception_.value()); + } + + // end_stream = false + ConnectionManager::ActiveTransPtr trans2 = + std::make_unique(*filter_, filter_->decoder_->metadata()); + trans2->sendLocalReply(AppException(AppExceptionType::ProtocolError, "End_stream is false"), + false); + + // route() with metadata=nullptr; + ConnectionManager::ActiveTransPtr trans3 = + std::make_unique(*filter_, filter_->decoder_->metadata()); + trans3->metadata_ = nullptr; + EXPECT_EQ(nullptr, trans3->route()); + + trans3->resetDownstreamConnection(); + } + + void resetAllTransTest(bool local_reset) { + // int before = stats_.cx_destroy_local_with_active_rq_; + const std::string yaml = R"EOF( +stat_prefix: egress +route_config: + name: local_route + routes: + - match: + domain: "test" + route: + cluster: "test" +settings: + transaction_timeout: 32s + own_domain: pcsf-cfed.cncs.svc.cluster.local + domain_match_parameter_name: x-suri +)EOF"; + initializeFilter(yaml); + + const std::string SIP_ACK_FULL = + "ACK sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "From: ;tag=1\x0d\x0a" + "To: \x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 ACK\x0d\x0a" + "Contact: \x0d\x0a" + "Supported: 100rel\x0d\x0a" + "Route: \x0d\x0a" + "P-Asserted-Identity: \x0d\x0a" + "Allow: UPDATE,INVITE,ACK,CANCEL,BYE,PRACK,REFER,MESSAGE,INFO\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Type: application/sdp\x0d\x0a" + "Content-Length: 127\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_ACK_FULL); + + filter_->decoder_->reassemble(buffer_); + filter_->decoder_->metadata_ = std::make_shared(buffer_.toString()); + filter_->decoder_->decode(); + + MessageMetadataSharedPtr metadata = filter_->decoder_->metadata_; + std::string&& k = std::string(metadata->transactionId().value()); + ConnectionManager::ActiveTransPtr new_trans = + std::make_unique(*filter_, metadata); + new_trans->createFilterChain(); + filter_->transactions_.emplace(k, std::move(new_trans)); + filter_->newDecoderEventHandler(metadata); + filter_->resetAllTrans(local_reset); + } + + void resumeResponseTest() { + const std::string yaml = R"EOF( +stat_prefix: egress +route_config: + name: local_route + routes: + - match: + domain: "test" + route: + cluster: "test" +settings: + transaction_timeout: 32s + own_domain: pcsf-cfed.cncs.svc.cluster.local + domain_match_parameter_name: x-suri +)EOF"; + initializeFilter(yaml); + + const std::string SIP_ACK_FULL = + "ACK sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "From: ;tag=1\x0d\x0a" + "To: \x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 ACK\x0d\x0a" + "Contact: \x0d\x0a" + "Supported: 100rel\x0d\x0a" + "Route: \x0d\x0a" + "P-Asserted-Identity: \x0d\x0a" + "Allow: UPDATE,INVITE,ACK,CANCEL,BYE,PRACK,REFER,MESSAGE,INFO\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Type: application/sdp\x0d\x0a" + "Content-Length: 127\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_ACK_FULL); + + filter_->decoder_->reassemble(buffer_); + filter_->decoder_->metadata_ = std::make_shared(buffer_.toString()); + filter_->decoder_->decode(); + + MessageMetadataSharedPtr metadata = filter_->decoder_->metadata_; + ConnectionManager::ActiveTransPtr new_trans = + std::make_unique(*filter_, metadata); + + new_trans->filter_action_ = [&](DecoderEventHandler* filter) -> FilterStatus { + UNREFERENCED_PARAMETER(filter); + new_trans->local_response_sent_ = true; + return FilterStatus::StopIteration; + }; + + std::list decoder_filter_list; + ConnectionManager::ActiveTransDecoderFilterPtr wrapper = + std::make_unique(*new_trans, decoder_filter_); + decoder_filter_->setDecoderFilterCallbacks(*wrapper); + LinkedList::moveIntoListBack(std::move(wrapper), decoder_filter_list); + + std::shared_ptr decoder_filter_1 = + std::make_shared>(); + ConnectionManager::ActiveTransDecoderFilterPtr wrapper2 = + std::make_unique(*new_trans, decoder_filter_1); + LinkedList::moveIntoListBack(std::move(wrapper2), decoder_filter_list); + + new_trans->applyDecoderFilters((*(decoder_filter_list.begin())).get()); + + // Other ActiveTransDecoderFilter function cover + ConnectionManager::ActiveTransDecoderFilterPtr decoder = + std::make_unique(*new_trans, decoder_filter_); + EXPECT_EQ(decoder->streamId(), new_trans->streamId()); + EXPECT_EQ(decoder->connection(), new_trans->connection()); + decoder->startUpstreamResponse(); + decoder->streamInfo(); + decoder->upstreamData(metadata); + decoder->resetDownstreamConnection(); + filter_->transactions_.emplace(std::string(metadata->transactionId().value()), + std::move(new_trans)); + decoder->onReset(); + } + + NiceMock context_; + std::shared_ptr decoder_filter_; + Stats::TestUtil::TestStore store_; + SipFilterStats stats_; + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy proto_config_; + + std::unique_ptr config_; + + Buffer::OwnedImpl buffer_; + Buffer::OwnedImpl write_buffer_; + NiceMock filter_callbacks_; + NiceMock random_; + std::unique_ptr filter_; + std::shared_ptr transaction_infos_; + SipFilters::DecoderFilterSharedPtr custom_filter_; + MessageMetadataSharedPtr metadata_; +}; + +TEST_F(SipConnectionManagerTest, OnDataHandlesSipCall) { + const std::string yaml = R"EOF( +stat_prefix: egress +route_config: + name: local_route + routes: + - match: + domain: "test" + route: + cluster: "test" +settings: + transaction_timeout: 32s + own_domain: pcsf-cfed.cncs.svc.cluster.local + domain_match_parameter_name: x-suri +)EOF"; + initializeFilter(yaml); + + const std::string SIP_INVITE_FULL = + "INVITE sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "From: ;tag=1\x0d\x0a" + "To: \x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: \x0d\x0a" + "Supported: 100rel\x0d\x0a" + "Route: \x0d\x0a" + "P-Asserted-Identity: \x0d\x0a" + "Allow: UPDATE,INVITE,ACK,CANCEL,BYE,PRACK,REFER,MESSAGE,INFO\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Type: application/sdp\x0d\x0a" + "Content-Length: 127\x0d\x0a" + "\x0d\x0a" + "v=0\x0d\x0a" + "o=PCTEL 256 2 IN IP4 11.0.0.10\x0d\x0a" + "c=IN IP4 11.0.0.10\x0d\x0a" + "m=audio 4030 RTP/AVP 0 8\x0d\x0a" + "a=rtpmap:0 PCMU/8000\x0d\x0a" + "a=rtpmap:8 PCMU/8000\x0d\x0a"; + + buffer_.add(SIP_INVITE_FULL); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(filter_->onData(buffer_, true), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipConnectionManagerTest, OnDataHandlesSipCallEndStream) { + const std::string yaml = R"EOF( +stat_prefix: egress +route_config: + name: local_route + routes: + - match: + domain: "test" + route: + cluster: "test" +settings: + transaction_timeout: 32s + own_domain: pcsf-cfed.cncs.svc.cluster.local + domain_match_parameter_name: x-suri +)EOF"; + initializeFilter(yaml); + + const std::string SIP_INVITE_FULL = + "INVITE sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "From: ;tag=1\x0d\x0a" + "To: \x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: \x0d\x0a" + "Supported: 100rel\x0d\x0a" + "Route: \x0d\x0a" + "P-Asserted-Identity: \x0d\x0a" + "Allow: UPDATE,INVITE,ACK,CANCEL,BYE,PRACK,REFER,MESSAGE,INFO\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Type: application/sdp\x0d\x0a" + "Content-Length: 127\x0d\x0a" + "\x0d\x0a" + "v=0\x0d\x0a" + "o=PCTEL 256 2 IN IP4 11.0.0.10\x0d\x0a" + "c=IN IP4 11.0.0.10\x0d\x0a" + "m=audio 4030 RTP/AVP 0 8\x0d\x0a" + "a=rtpmap:0 PCMU/8000\x0d\x0a" + "a=rtpmap:8 PCMU/8000\x0d\x0a"; + + buffer_.add(SIP_INVITE_FULL); + + EXPECT_EQ(filter_->onData(buffer_, true), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipConnectionManagerTest, SendLocalReply_SuccessReply) { + sendLocalReply( + Envoy::Extensions::NetworkFilters::SipProxy::DirectResponse::ResponseType::SuccessReply); +} + +TEST_F(SipConnectionManagerTest, SendLocalReply_ErrorReply) { + sendLocalReply( + Envoy::Extensions::NetworkFilters::SipProxy::DirectResponse::ResponseType::ErrorReply); +} + +TEST_F(SipConnectionManagerTest, SendLocalReply_Exception) { + sendLocalReply( + Envoy::Extensions::NetworkFilters::SipProxy::DirectResponse::ResponseType::Exception); +} + +TEST_F(SipConnectionManagerTest, UpstreamData) { upstreamDataTest(); } + +TEST_F(SipConnectionManagerTest, ResetLocalTrans) { + resetAllTransTest(true); + EXPECT_EQ(1U, store_.counter("test.cx_destroy_local_with_active_rq").value()); +} + +TEST_F(SipConnectionManagerTest, ResetRemoteTrans) { + resetAllTransTest(false); + EXPECT_EQ(1U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); +} +TEST_F(SipConnectionManagerTest, ResumeResponse) { resumeResponseTest(); } + +TEST_F(SipConnectionManagerTest, EncodeInsertEPMatchedxSuri) { + const std::string SIP_OK200_FULL = + "SIP/2.0 200 OK\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: " + "\x0d\x0a" + "Record-Route: \x0d\x0a" + "Route: \x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + + buffer_.add(SIP_OK200_FULL); + + absl::string_view header = ""; + metadata_ = std::make_shared(buffer_.toString()); + metadata_->addEPOperation(SIP_OK200_FULL.find("Contact: ") + strlen("Contact: "), header, + "pcsf-cfed.cncs.svc.cluster.local", "x-suri"); + Buffer::OwnedImpl response_buffer; + metadata_->setEP("127.0.0.1"); + + std::shared_ptr encoder = std::make_shared(); + encoder->encode(metadata_, response_buffer); + EXPECT_EQ(response_buffer.length(), buffer_.length() + strlen(";ep=127.0.0.1")); +} + +TEST_F(SipConnectionManagerTest, EncodeInsertEPMatchedHost) { + const std::string SIP_OK200_FULL = + "SIP/2.0 200 OK\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: " + "\x0d\x0a" + "Record-Route: \x0d\x0a" + "Route: \x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + + buffer_.add(SIP_OK200_FULL); + + absl::string_view header = ""; + metadata_ = std::make_shared(buffer_.toString()); + metadata_->addEPOperation(SIP_OK200_FULL.find("Contact: ") + strlen("Contact: "), header, + "11.0.0.10", "host"); + Buffer::OwnedImpl response_buffer; + metadata_->setEP("127.0.0.1"); + + std::shared_ptr encoder = std::make_shared(); + encoder->encode(metadata_, response_buffer); + EXPECT_EQ(response_buffer.length(), buffer_.length() + strlen(";ep=127.0.0.1")); +} + +TEST_F(SipConnectionManagerTest, EncodeInsertEPNoMatchedxSuri) { + const std::string SIP_OK200_FULL = + "SIP/2.0 200 OK\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: \x0d\x0a" + "Record-Route: \x0d\x0a" + "Route: \x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + + buffer_.add(SIP_OK200_FULL); + + absl::string_view header = ""; + metadata_ = std::make_shared(buffer_.toString()); + metadata_->addEPOperation(SIP_OK200_FULL.find("Contact: ") + strlen("Contact: "), header, + "11.0.0.10", "x-suri"); + Buffer::OwnedImpl response_buffer; + metadata_->setEP("127.0.0.1"); + + std::shared_ptr encoder = std::make_shared(); + encoder->encode(metadata_, response_buffer); + EXPECT_EQ(response_buffer.length(), buffer_.length() + strlen(";ep=127.0.0.1")); +} + +TEST_F(SipConnectionManagerTest, EncodeInsertOpaque) { + const std::string SIP_OK200_FULL = + "SIP/2.0 200 OK\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: " + "\x0d\x0a" + "Record-Route: \x0d\x0a" + "Route: \x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + + buffer_.add(SIP_OK200_FULL); + + absl::string_view header = + "Contact: "; + metadata_ = std::make_shared(buffer_.toString()); + metadata_->addOpaqueOperation(SIP_OK200_FULL.find("Contact: "), header); + Buffer::OwnedImpl response_buffer; + metadata_->setEP("127.0.0.1"); + + std::shared_ptr encoder = std::make_shared(); + encoder->encode(metadata_, response_buffer); + EXPECT_EQ(response_buffer.length(), buffer_.length() + strlen(",opaque=\"127.0.0.1\"")); +} + +TEST_F(SipConnectionManagerTest, EncodeDelete) { + const std::string SIP_OK200_FULL = + "SIP/2.0 200 OK\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: " + "\x0d\x0a" + "Record-Route: \x0d\x0a" + "Route: \x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + + buffer_.add(SIP_OK200_FULL); + + metadata_ = std::make_shared(buffer_.toString()); + metadata_->setOperation(Operation(OperationType::Delete, SIP_OK200_FULL.find(";transport="), + DeleteOperationValue(strlen(";transport=TCP")))); + Buffer::OwnedImpl response_buffer; + + std::shared_ptr encoder = std::make_shared(); + encoder->encode(metadata_, response_buffer); + EXPECT_EQ(response_buffer.length(), buffer_.length() - strlen(";transport=TCP")); +} + +TEST_F(SipConnectionManagerTest, EncodeModify) { + const std::string SIP_OK200_FULL = + "SIP/2.0 200 OK\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: " + "\x0d\x0a" + "Record-Route: \x0d\x0a" + "Route: \x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + + buffer_.add(SIP_OK200_FULL); + + metadata_ = std::make_shared(buffer_.toString()); + metadata_->setOperation(Operation(OperationType::Modify, + SIP_OK200_FULL.find(";transport=") + strlen(";transport="), + ModifyOperationValue(strlen("TCP"), "SCTP"))); + Buffer::OwnedImpl response_buffer; + + std::shared_ptr encoder = std::make_shared(); + encoder->encode(metadata_, response_buffer); + EXPECT_EQ(response_buffer.length(), buffer_.length() - strlen("TCP") + strlen("SCTP")); +} + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/test/decoder_test.cc b/contrib/sip_proxy/filters/network/test/decoder_test.cc new file mode 100644 index 0000000000000..4a25ae8e51501 --- /dev/null +++ b/contrib/sip_proxy/filters/network/test/decoder_test.cc @@ -0,0 +1,565 @@ +#include "source/common/buffer/buffer_impl.h" + +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/test_common/printers.h" +#include "test/test_common/utility.h" + +#include "absl/strings/string_view.h" +#include "contrib/sip_proxy/filters/network/source/app_exception_impl.h" +#include "contrib/sip_proxy/filters/network/source/config.h" +#include "contrib/sip_proxy/filters/network/source/conn_manager.h" +#include "contrib/sip_proxy/filters/network/source/decoder.h" +#include "contrib/sip_proxy/filters/network/test/mocks.h" +#include "contrib/sip_proxy/filters/network/test/utility.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +class TestConfigImpl : public ConfigImpl { +public: + TestConfigImpl(envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy proto_config, + Server::Configuration::MockFactoryContext& context, + SipFilters::DecoderFilterSharedPtr decoder_filter, SipFilterStats& stats) + : ConfigImpl(proto_config, context), decoder_filter_(decoder_filter), stats_(stats) {} + + // ConfigImpl + SipFilterStats& stats() override { return stats_; } + void createFilterChain(SipFilters::FilterChainFactoryCallbacks& callbacks) override { + if (custom_filter_) { + callbacks.addDecoderFilter(custom_filter_); + } + callbacks.addDecoderFilter(decoder_filter_); + } + + SipFilters::DecoderFilterSharedPtr custom_filter_; + SipFilters::DecoderFilterSharedPtr decoder_filter_; + SipFilterStats& stats_; +}; + +class SipDecoderTest : public testing::Test { +public: + SipDecoderTest() + : stats_(SipFilterStats::generateStats("test.", store_)), + transaction_infos_(std::make_shared()) {} + ~SipDecoderTest() override { + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + } + + void initializeFilter() { initializeFilter(""); } + + void initializeFilter(const std::string& yaml) { + // Destroy any existing filter first. + filter_ = nullptr; + + for (const auto& counter : store_.counters()) { + counter->reset(); + } + + if (yaml.empty()) { + proto_config_.set_stat_prefix("test"); + } else { + TestUtility::loadFromYaml(yaml, proto_config_); + TestUtility::validate(proto_config_); + } + + proto_config_.set_stat_prefix("test"); + + decoder_filter_ = std::make_shared>(); + + config_ = std::make_unique(proto_config_, context_, decoder_filter_, stats_); + if (custom_filter_) { + config_->custom_filter_ = custom_filter_; + } + + ON_CALL(random_, random()).WillByDefault(Return(42)); + filter_ = std::make_unique( + *config_, random_, filter_callbacks_.connection_.dispatcher_.timeSource(), + transaction_infos_); + filter_->initializeReadFilterCallbacks(filter_callbacks_); + filter_->onNewConnection(); + + // NOP currently. + filter_->onAboveWriteBufferHighWatermark(); + filter_->onBelowWriteBufferLowWatermark(); + } + + void headerHandlerTest() { + MockDecoderCallbacks callback; + Decoder decoder(callback); + decoder.setCurrentHeader(HeaderType::Via); + Decoder::REGISTERHandler msgHandler(decoder); + Decoder::HeaderHandler headerHandler(msgHandler); + EXPECT_EQ(HeaderType::Via, headerHandler.currentHeader()); + absl::string_view str(""); + headerHandler.processEvent(str); + headerHandler.processCseq(str); + + DecoderStateMachine::DecoderStatus status(State::MessageBegin); + } + + NiceMock context_; + std::shared_ptr decoder_filter_; + Stats::TestUtil::TestStore store_; + SipFilterStats stats_; + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProxy proto_config_; + + std::unique_ptr config_; + + Buffer::OwnedImpl buffer_; + Buffer::OwnedImpl write_buffer_; + NiceMock filter_callbacks_; + NiceMock random_; + std::unique_ptr filter_; + std::shared_ptr transaction_infos_; + SipFilters::DecoderFilterSharedPtr custom_filter_; +}; + +const std::string yaml = R"EOF( +stat_prefix: egress +route_config: + name: local_route + routes: + - match: + domain: "test" + route: + cluster: "test" +settings: + transaction_timeout: 32s + own_domain: pcsf-cfed.cncs.svc.cluster.local + domain_match_parameter_name: x-suri +)EOF"; + +TEST_F(SipDecoderTest, DecodeINVITE) { + initializeFilter(yaml); + + const std::string SIP_INVITE_FULL = + "INVITE sip:User.0000@tas01.defult.svc.cluster.local;ep=127.0.0.1 SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "Route: \x0d\x0a" + "Record-Route: \x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: \x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + + buffer_.add(SIP_INVITE_FULL); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeRegister) { + initializeFilter(yaml); + + const std::string SIP_REGISTER_FULL = + "REGISTER sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "CSeq: 1 REGISTER\x0d\x0a" + "Contact: \x0d\x0a" + "Expires: 7200\x0d\x0a" + "Supported: 100rel,timer\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "Require: Path\x0d\x0a" + "Path: " + "\x0d\x0a" + "Record-Route: \x0d\x0a" + "Route: \x0d\x0a" + "Authorization: Digest username=\"tc05sub1@cncs.nokialab.com\", realm=\"cncs.nokialab.com\", " + "nonce=\"436dbd0f60a52adc2DPadc43f91c774b51ac4cad614258c43cf9df\", algorithm=MD5, " + "uri=\"sip:10.30.29.47\", response=\"c4f3c2fccdca9c5febc66d4226b5afae\", nc=01201201, " + "cnonce=\"123456\", qop=auth\x0d\x0a" + "Authorization: Digest username=\"tc05sub1@cncs.nokialab.com\", realm=\"cncs.nokialab.com\", " + "nonce=\"436dbd0f60a52adc2DPadc43f91c774b51ac4cad614258c43cf9df\", algorithm=MD5, " + "uri=\"sip:10.30.29.47\", response=\"c4f3c2fccdca9c5febc66d4226b5afae\", nc=01201201, " + "cnonce=\"123456\", qop=auth, opaque=\"127.0.0.1\"\x0d\x0a" + "Authorization: Digest username=\"tc05sub1@cncs.nokialab.com\", realm=\"cncs.nokialab.com\", " + "nonce=\"436dbd0f60a52adc2DPadc43f91c774b51ac4cad614258c43cf9df\", algorithm=MD5, " + "uri=\"sip:10.30.29.47\", response=\"c4f3c2fccdca9c5febc66d4226b5afae\", nc=01201201, " + "cnonce=\"123456\", qop=auth, opaque=\"127.0.0.1\x0d\x0a" + "\x0d\x0a"; + + buffer_.add(SIP_REGISTER_FULL); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeOK200) { + initializeFilter(yaml); + + const std::string SIP_OK200_FULL = + "SIP/2.0 200 OK\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 INVITE\x0d\x0a" + "Contact: \x0d\x0a" + "Record-Route: \x0d\x0a" + "Route: \x0d\x0a" + "Service-Route: \x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Path: " + "\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_OK200_FULL); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + const std::string SIP_OK200_REGISTER = + "SIP/2.0 200 OK\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "CSeq: 1 Register\x0d\x0a" + "Contact: \x0d\x0a" + "Record-Route: \x0d\x0a" + "Service-Route: \x0d\x0a" + "Route: \x0d\x0a" + "Route: \x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_OK200_REGISTER); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_EQ(2U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} +TEST_F(SipDecoderTest, DecodeGeneral) { + initializeFilter(yaml); + + const std::string SIP_CANCEL_FULL = + "CANCEL sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "Route: \x0d\x0a" + "Record-Route: \x0d\x0a" + "CSeq: 1 CANCEL\x0d\x0a" + "Contact: \x0d\x0a" + "Path: " + "\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + + buffer_.add(SIP_CANCEL_FULL); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeSUBSCRIBE) { + initializeFilter(yaml); + + const std::string SIP_SUBSCRIBE_FULL = + "SUBSCRIBE sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "Route: \x0d\x0a" + "Record-Route: \x0d\x0a" + "CSeq: 2 SUBSCRIBE\x0d\x0a" + "Contact: \x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "Event: feature-status-exchange\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_SUBSCRIBE_FULL); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + const std::string SIP_SUBSCRIBE_REG = + "SUBSCRIBE sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "Route: \x0d\x0a" + "Record-Route: \x0d\x0a" + "CSeq: 2 SUBSCRIBE\x0d\x0a" + "Contact: \x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "Event: reg\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_SUBSCRIBE_REG); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_EQ(2U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeFAILURE4XX) { + initializeFilter(yaml); + + const std::string SIP_FAILURE4XX_FULL = + "SIP/2.0 401 Unauthorized\x0d\x0a" + "Contact: \x0d\x0a" + "Via: SIP/2.0/TCP " + "192.169.110.15:5060;branch=z9hG4bK1f6eb66cd87d2ae67c4b8a69d67c4f7e60a522a8-0-b-" + "60a52adb349a8674\x0d\x0a" + "Via: SIP/2.0/UDP 127.0.0.1;branch=z9hG4bK_0002_34-139705093266412;lsstag=pt-1.12\x0d\x0a" + "Via: SIP/2.0/UDP 10.30.29.58:38612;received=10.30.29.58;branch=z9hG4bK1434\x0d\x0a" + "From: ;tag=587215\x0d\x0a" + "To: ;tag=182294901\x0d\x0a" + "Call-ID: tc05sub1-1@10.30.29.58-38612\x0d\x0a" + "CSeq: 6 REGISTER\x0d\x0a" + "P-Charging-Vector: icid-value=\"PCSF:1-cfed-0-1-0000000060a52adb-000000000000000b\"\x0d\x0a" + "WWW-Authenticate: Digest " + "realm=\"cncs.nokialab.com\",nonce=" + "\"436dbd0f60a52adc2DPadc43f91c774b51ac4cad614258c43cf9df\",algorithm=MD5,qop=" + "\"auth\"\x0d\x0a" + "WWW-Authenticate: Digest " + "realm=\"cncs.nokialab.com\",nonce=" + "\"436dbd0f60a52adc2DPadc43f91c774b51ac4cad614258c43cf9df\",algorithm=MD5,qop=\"auth\"," + "opaque=\"127.0.0.1\"\x0d\x0a" + "WWW-Authenticate: Digest " + "realm=\"cncs.nokialab.com\",nonce=" + "\"436dbd0f60a52adc2DPadc43f91c774b51ac4cad614258c43cf9df\",algorithm=MD5,qop=\"auth\"," + "opaque=\"127.0.0.1\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_FAILURE4XX_FULL); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeEMPTY) { + initializeFilter(yaml); + + const std::string SIP_EMPTY = "\x0d\x0a"; + buffer_.add(SIP_EMPTY); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + const std::string SIP_WRONG_METHOD_TYPE = + "WRONGMETHOD sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "CSeq: 2 SUBSCRIBE\x0d\x0a" + "Contact: ;tag=1\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_WRONG_METHOD_TYPE); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + const std::string SIP_NO_CONTENT_LENGTH = + "ACK sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "CSeq: 2 ACK\x0d\x0a" + "Contact: ;tag=1\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_NO_CONTENT_LENGTH); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + const std::string SIP_CONTENT_LENGTH_ZERO = + "ACK sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "CSeq: 2 ACK\x0d\x0a" + "Contact: ;tag=1\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: -1\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_NO_CONTENT_LENGTH); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeAck) { + initializeFilter(yaml); + + const std::string SIP_ACK_FULL = + "ACK sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "CSeq: 2 ACK\x0d\x0a" + "Contact: ;tag=1\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_ACK_FULL); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeBYE) { + initializeFilter(yaml); + + const std::string SIP_BYE_FULL = + "BYE sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "CSeq: 2 BYE\x0d\x0a" + "Contact: ;tag=1\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_BYE_FULL); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeUPDATE) { + initializeFilter(yaml); + + const std::string SIP_UPDATE_FULL = + "UPDATE sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "CSeq: 2 UPDATE\x0d\x0a" + "Contact: ;tag=1\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_UPDATE_FULL); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeREFER) { + initializeFilter(yaml); + + const std::string SIP_REFER_FULL = + "REFER sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "CSeq: 2 REFER\x0d\x0a" + "Contact: ;tag=1\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_REFER_FULL); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, DecodeNOTIFY) { + initializeFilter(yaml); + + const std::string SIP_NOTIFY_FULL = + "NOTIFY sip:User.0000@tas01.defult.svc.cluster.local SIP/2.0\x0d\x0a" + "Call-ID: 1-3193@11.0.0.10\x0d\x0a" + "Via: SIP/2.0/TCP 11.0.0.10:15060;branch=z9hG4bK-3193-1-0\x0d\x0a" + "To: \x0d\x0a" + "From: ;tag=1\x0d\x0a" + "Route: \x0d\x0a" + "CSeq: 1 NOTIFY\x0d\x0a" + "Contact: ;tag=1\x0d\x0a" + "Record-Route: \x0d\x0a" + "Service-Route: \x0d\x0a" + "Path: " + "\x0d\x0a" + "Max-Forwards: 70\x0d\x0a" + "Content-Length: 0\x0d\x0a" + "\x0d\x0a"; + buffer_.add(SIP_NOTIFY_FULL); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, stats_.request_active_.value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(SipDecoderTest, HandleState) { + MessageMetadataSharedPtr metadata; + MockDecoderEventHandler handler; + DecoderStateMachine machine(metadata, handler); + /* TODO panic: not reached + machine.setCurrentState(State::WaitForData); + */ + machine.setCurrentState(State::MessageEnd); + EXPECT_CALL(handler, messageEnd()).WillOnce(Return(FilterStatus::StopIteration)); + machine.run(); + EXPECT_EQ(State::TransportEnd, machine.currentState()); +} + +TEST_F(SipDecoderTest, HeaderTest) { + StateNameValues stateNameValues_; + EXPECT_EQ("Done", stateNameValues_.name(State::Done)); +} + +TEST_F(SipDecoderTest, HeaderHandlerTest) { headerHandlerTest(); } + +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/test/mocks.cc b/contrib/sip_proxy/filters/network/test/mocks.cc new file mode 100644 index 0000000000000..2fa4e2ae1bc6f --- /dev/null +++ b/contrib/sip_proxy/filters/network/test/mocks.cc @@ -0,0 +1,97 @@ +#include "contrib/sip_proxy/filters/network/test/mocks.h" + +#include + +#include "source/common/protobuf/protobuf.h" + +#include "gtest/gtest.h" + +using testing::_; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { + +// Provide a specialization for ProtobufWkt::Struct (for MockFilterConfigFactory) +template <> +void MessageUtil::validate(const ProtobufWkt::Struct&, ProtobufMessage::ValidationVisitor&) {} + +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +MockConfig::MockConfig() = default; +MockConfig::~MockConfig() = default; + +MockDecoderCallbacks::MockDecoderCallbacks() { + ON_CALL(*this, getLocalIp()).WillByDefault(Return("127.0.0.1")); + ON_CALL(*this, getOwnDomain()).WillByDefault(Return("pcsf-cfed.cncs.svc.cluster.local")); + ON_CALL(*this, getDomainMatchParamName()).WillByDefault(Return("x-suri")); +} +MockDecoderCallbacks::~MockDecoderCallbacks() = default; + +MockDecoderEventHandler::MockDecoderEventHandler() { + ON_CALL(*this, transportBegin(_)).WillByDefault(Return(FilterStatus::Continue)); + ON_CALL(*this, transportEnd()).WillByDefault(Return(FilterStatus::Continue)); + ON_CALL(*this, messageBegin(_)).WillByDefault(Return(FilterStatus::Continue)); + ON_CALL(*this, messageEnd()).WillByDefault(Return(FilterStatus::Continue)); +} +MockDecoderEventHandler::~MockDecoderEventHandler() = default; + +MockDirectResponse::MockDirectResponse() = default; +MockDirectResponse::~MockDirectResponse() = default; + +namespace SipFilters { + +MockDecoderFilter::MockDecoderFilter() { + ON_CALL(*this, transportBegin(_)).WillByDefault(Return(FilterStatus::Continue)); + ON_CALL(*this, transportEnd()).WillByDefault(Return(FilterStatus::Continue)); + ON_CALL(*this, messageBegin(_)).WillByDefault(Return(FilterStatus::Continue)); + ON_CALL(*this, messageEnd()).WillByDefault(Return(FilterStatus::Continue)); +} +MockDecoderFilter::~MockDecoderFilter() = default; + +MockDecoderFilterCallbacks::MockDecoderFilterCallbacks() { + + ON_CALL(*this, streamId()).WillByDefault(Return(stream_id_)); + ON_CALL(*this, transactionInfos()).WillByDefault(Return(transaction_infos_)); + ON_CALL(*this, streamInfo()).WillByDefault(ReturnRef(stream_info_)); +} +MockDecoderFilterCallbacks::~MockDecoderFilterCallbacks() = default; + +MockFilterConfigFactory::MockFilterConfigFactory() : name_("envoy.filters.sip.mock_filter") { + mock_filter_ = std::make_shared>(); +} + +MockFilterConfigFactory::~MockFilterConfigFactory() = default; + +FilterFactoryCb MockFilterConfigFactory::createFilterFactoryFromProto( + const Protobuf::Message& proto_config, const std::string& stats_prefix, + Server::Configuration::FactoryContext& context) { + UNREFERENCED_PARAMETER(context); + + config_struct_ = dynamic_cast(proto_config); + config_stat_prefix_ = stats_prefix; + + return [this](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addDecoderFilter(mock_filter_); + }; +} + +} // namespace SipFilters +// +namespace Router { + +MockRouteEntry::MockRouteEntry() { + ON_CALL(*this, clusterName()).WillByDefault(ReturnRef(cluster_name_)); +} +MockRouteEntry::~MockRouteEntry() = default; + +MockRoute::MockRoute() { ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); } +MockRoute::~MockRoute() = default; + +} // namespace Router +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/test/mocks.h b/contrib/sip_proxy/filters/network/test/mocks.h new file mode 100644 index 0000000000000..456d973e78ee8 --- /dev/null +++ b/contrib/sip_proxy/filters/network/test/mocks.h @@ -0,0 +1,180 @@ +#pragma once + +#include "envoy/router/router.h" + +#include "test/mocks/network/mocks.h" +#include "test/mocks/stream_info/mocks.h" +#include "test/test_common/printers.h" + +#include "contrib/sip_proxy/filters/network/source/conn_manager.h" +#include "contrib/sip_proxy/filters/network/source/conn_state.h" +#include "contrib/sip_proxy/filters/network/source/filters/factory_base.h" +#include "contrib/sip_proxy/filters/network/source/filters/filter.h" +#include "contrib/sip_proxy/filters/network/source/metadata.h" +#include "contrib/sip_proxy/filters/network/source/protocol.h" +#include "contrib/sip_proxy/filters/network/source/router/router.h" +#include "gmock/gmock.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { + +class MockConfig : public Config { +public: + MockConfig(); + ~MockConfig() override; + + // SipProxy::Config + MOCK_METHOD(SipFilters::FilterChainFactory&, filterFactory, ()); + MOCK_METHOD(SipFilterStats&, stats, ()); + MOCK_METHOD(DecoderPtr, createDecoder, (DecoderCallbacks&)); + MOCK_METHOD(Router::Config&, routerConfig, ()); +}; + +class MockDecoderEventHandler : public DecoderEventHandler { +public: + MockDecoderEventHandler(); + ~MockDecoderEventHandler() override; + + // SipProxy::DecoderEventHandler + MOCK_METHOD(FilterStatus, transportBegin, (MessageMetadataSharedPtr metadata)); + MOCK_METHOD(FilterStatus, transportEnd, ()); + MOCK_METHOD(FilterStatus, messageBegin, (MessageMetadataSharedPtr metadata)); + MOCK_METHOD(FilterStatus, messageEnd, ()); +}; + +class MockDecoderCallbacks : public DecoderCallbacks { +public: + MockDecoderCallbacks(); + ~MockDecoderCallbacks() override; + + // SipProxy::DecoderCallbacks + MOCK_METHOD(DecoderEventHandler&, newDecoderEventHandler, (MessageMetadataSharedPtr)); + MOCK_METHOD(absl::string_view, getLocalIp, ()); + MOCK_METHOD(std::string, getOwnDomain, ()); + MOCK_METHOD(std::string, getDomainMatchParamName, ()); +}; + +class MockDirectResponse : public DirectResponse { +public: + MockDirectResponse(); + ~MockDirectResponse() override; + + // SipProxy::DirectResponse + MOCK_METHOD(DirectResponse::ResponseType, encode, + (MessageMetadata & metadata, Buffer::Instance& buffer), (const)); +}; + +namespace Router { +class MockRoute; +} // namespace Router + +namespace SipFilters { + +class MockDecoderFilter : public DecoderFilter { +public: + MockDecoderFilter(); + ~MockDecoderFilter() override; + + // SipProxy::SipFilters::DecoderFilter + MOCK_METHOD(void, onDestroy, ()); + MOCK_METHOD(void, setDecoderFilterCallbacks, (DecoderFilterCallbacks & callbacks)); + MOCK_METHOD(void, resetUpstreamConnection, ()); + MOCK_METHOD(bool, passthroughSupported, (), (const)); + + // SipProxy::DecoderEventHandler + MOCK_METHOD(FilterStatus, passthroughData, (Buffer::Instance & data)); + MOCK_METHOD(FilterStatus, transportBegin, (MessageMetadataSharedPtr metadata)); + MOCK_METHOD(FilterStatus, transportEnd, ()); + MOCK_METHOD(FilterStatus, messageBegin, (MessageMetadataSharedPtr metadata)); + MOCK_METHOD(FilterStatus, messageEnd, ()); +}; + +class MockDecoderFilterCallbacks : public DecoderFilterCallbacks { +public: + MockDecoderFilterCallbacks(); + ~MockDecoderFilterCallbacks() override; + + // SipProxy::SipFilters::DecoderFilterCallbacks + MOCK_METHOD(uint64_t, streamId, (), (const)); + MOCK_METHOD(std::string, transactionId, (), (const)); + MOCK_METHOD(const Network::Connection*, connection, (), (const)); + MOCK_METHOD(Router::RouteConstSharedPtr, route, ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(void, sendLocalReply, (const DirectResponse&, bool)); + MOCK_METHOD(void, startUpstreamResponse, ()); + MOCK_METHOD(ResponseStatus, upstreamData, (MessageMetadataSharedPtr)); + MOCK_METHOD(void, resetDownstreamConnection, ()); + MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ()); + MOCK_METHOD(std::shared_ptr, transactionInfos, ()); + MOCK_METHOD(std::shared_ptr, settings, ()); + MOCK_METHOD(void, onReset, ()); + MOCK_METHOD(MessageMetadataSharedPtr, responseMetadata, ()); + MOCK_METHOD(bool, responseSuccess, ()); + + uint64_t stream_id_{1}; + std::string transaction_id_{"test"}; + NiceMock connection_; + NiceMock stream_info_; + std::shared_ptr route_; + std::shared_ptr transaction_infos_; +}; + +class MockFilterConfigFactory : public NamedSipFilterConfigFactory { +public: + MockFilterConfigFactory(); + ~MockFilterConfigFactory() override; + + FilterFactoryCb + createFilterFactoryFromProto(const Protobuf::Message& proto_config, + const std::string& stats_prefix, + Server::Configuration::FactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { return name_; } + + ProtobufWkt::Struct config_struct_; + std::string config_stat_prefix_; + +private: + std::shared_ptr mock_filter_; + const std::string name_; +}; + +} // namespace SipFilters + +namespace Router { + +class MockRouteEntry : public RouteEntry { +public: + MockRouteEntry(); + ~MockRouteEntry() override; + + // // SipProxy::Router::RouteEntry + MOCK_METHOD(const std::string&, clusterName, (), (const)); + MOCK_METHOD(const Envoy::Router::MetadataMatchCriteria*, metadataMatchCriteria, (), (const)); + std::string cluster_name_{"fake_cluster"}; +}; + +class MockRoute : public Route { +public: + MockRoute(); + ~MockRoute() override; + + // SipProxy::Router::Route + MOCK_METHOD(const RouteEntry*, routeEntry, (), (const)); + + NiceMock route_entry_; +}; + +} // namespace Router +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/test/router_test.cc b/contrib/sip_proxy/filters/network/test/router_test.cc new file mode 100644 index 0000000000000..41064a1abbbc2 --- /dev/null +++ b/contrib/sip_proxy/filters/network/test/router_test.cc @@ -0,0 +1,682 @@ +#include +#include + +#include "envoy/tcp/conn_pool.h" + +#include "source/common/buffer/buffer_impl.h" + +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/upstream/host.h" +#include "test/test_common/printers.h" +#include "test/test_common/registry.h" + +#include "contrib/sip_proxy/filters/network/source/app_exception_impl.h" +#include "contrib/sip_proxy/filters/network/source/config.h" +#include "contrib/sip_proxy/filters/network/source/router/config.h" +#include "contrib/sip_proxy/filters/network/source/router/router_impl.h" +#include "contrib/sip_proxy/filters/network/source/sip.h" +#include "contrib/sip_proxy/filters/network/test/mocks.h" +#include "contrib/sip_proxy/filters/network/test/utility.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::ContainsRegex; +using testing::Eq; +using testing::Invoke; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace Router { + +class SipRouterTest : public testing::Test { +public: + SipRouterTest() = default; + void initializeTrans(bool has_option = true) { + if (has_option == true) { + const std::string yaml = R"EOF( +session_affinity: true +registration_affinity: true +)EOF"; + + envoy::extensions::filters::network::sip_proxy::v3alpha::SipProtocolOptions config; + TestUtility::loadFromYaml(yaml, config); + + const auto options = std::make_shared(config); + EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.cluster_.info_, + extensionProtocolOptions(_)) + .WillRepeatedly(Return(options)); + } + + transaction_infos_ = std::make_shared(); + context_.cluster_manager_.initializeThreadLocalClusters({"cluster"}); + } + + void initializeRouter() { + route_ = new NiceMock(); + route_ptr_.reset(route_); + + router_ = std::make_unique(context_.clusterManager(), "test", context_.scope()); + + EXPECT_EQ(nullptr, router_->downstreamConnection()); + + EXPECT_CALL(callbacks_, transactionInfos()).WillOnce(Return(transaction_infos_)); + router_->setDecoderFilterCallbacks(callbacks_); + } + + void initializeRouterWithCallback() { + route_ = new NiceMock(); + route_ptr_.reset(route_); + + router_ = std::make_unique(context_.clusterManager(), "test", context_.scope()); + + EXPECT_CALL(callbacks_, transactionInfos()).WillOnce(Return(transaction_infos_)); + router_->setDecoderFilterCallbacks(callbacks_); + + EXPECT_EQ(nullptr, router_->downstreamConnection()); + } + + void initializeMetadata(MsgType msg_type, MethodType method = MethodType::Invite, + bool set_destination = true) { + + metadata_ = std::make_shared(); + metadata_->setMethodType(method); + metadata_->setMsgType(msg_type); + metadata_->setTransactionId(""); + metadata_->setRouteEP("10.0.0.1"); + metadata_->setRouteOpaque("10.0.0.1"); + metadata_->setDomain( + "", + "host"); + if (set_destination) { + metadata_->setDestination("10.0.0.1"); + } + } + + void initializeTransaction() { + auto transaction_info_ptr = std::make_shared( + "test", thread_local_, static_cast(2), "", "x-suri"); + transaction_info_ptr->init(); + transaction_infos_->emplace(cluster_name_, transaction_info_ptr); + } + + void startRequest(MsgType msg_type, MethodType method = MethodType::Invite) { + // const bool strip_service_name = false) + initializeMetadata(msg_type, method); + EXPECT_EQ(FilterStatus::Continue, router_->transportBegin(metadata_)); + + EXPECT_CALL(callbacks_, route()).WillRepeatedly(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillRepeatedly(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); + + EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_)); + + EXPECT_CALL(callbacks_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); + EXPECT_EQ(&connection_, router_->downstreamConnection()); + + EXPECT_EQ(nullptr, router_->metadataMatchCriteria()); + } + + void connectUpstream() { + EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.connection_data_, + addUpstreamCallbacks(_)) + .WillOnce(Invoke([&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void { + upstream_callbacks_ = &cb; + })); + + conn_state_.reset(); + EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.connection_data_, + connectionState()) + .WillRepeatedly( + Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); })); + + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolReady(upstream_connection_); + + EXPECT_NE(nullptr, upstream_callbacks_); + } + + void startRequestWithExistingConnection(MsgType msg_type, + MethodType method = MethodType::Invite) { + initializeMetadata(msg_type, method); + EXPECT_EQ(FilterStatus::Continue, router_->transportBegin({})); + + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); + EXPECT_NE(nullptr, upstream_callbacks_); + } + + void completeRequest() { + EXPECT_EQ(FilterStatus::Continue, router_->messageEnd()); + EXPECT_EQ(FilterStatus::Continue, router_->transportEnd()); + } + + void returnResponse(MsgType msg_type = MsgType::Response, bool is_success = true) { + Buffer::OwnedImpl buffer; + + initializeMetadata(msg_type, MethodType::Ok200, false); + + ON_CALL(callbacks_, responseSuccess()).WillByDefault(Return(is_success)); + + upstream_callbacks_->onUpstreamData(buffer, false); + } + + void destroyRouter() { + router_->onDestroy(); + router_.reset(); + } + void destroyRouterOutofRange() { + // std::out_of_range Exception + EXPECT_CALL(callbacks_, transactionId()) + .Times(2) + .WillOnce(Return("test")) + .WillOnce(Return("test1")); + + router_->onDestroy(); + router_.reset(); + } + + NiceMock context_; + NiceMock connection_; + NiceMock streamInfo_; + NiceMock dispatcher_; + NiceMock time_source_; + NiceMock callbacks_; + NiceMock* route_{}; + NiceMock route_entry_; + NiceMock* host_{}; + Tcp::ConnectionPool::ConnectionStatePtr conn_state_; + Buffer::OwnedImpl buffer_; + NiceMock thread_local_; + + std::shared_ptr transaction_infos_; + + RouteConstSharedPtr route_ptr_; + std::unique_ptr router_; + + std::string cluster_name_{"cluster"}; + + MsgType msg_type_{MsgType::Request}; + MessageMetadataSharedPtr metadata_; + + Tcp::ConnectionPool::UpstreamCallbacks* upstream_callbacks_{}; + NiceMock upstream_connection_; +}; + +TEST_F(SipRouterTest, Call) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + connectUpstream(); + completeRequest(); + returnResponse(); + EXPECT_CALL(callbacks_, transactionId()).WillRepeatedly(Return("test")); + destroyRouter(); +} + +TEST_F(SipRouterTest, CallWithNotify) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + + initializeMetadata(MsgType::Request, MethodType::Notify); + metadata_->setEP("10.0.0.1"); + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); + + auto& transaction_info_ptr = (*transaction_infos_)[cluster_name_]; + EXPECT_NE(nullptr, transaction_info_ptr); + std::shared_ptr upstream_request_ptr = + transaction_info_ptr->getUpstreamRequest("10.0.0.1"); + EXPECT_NE(nullptr, upstream_request_ptr); + upstream_request_ptr->resetStream(); + + transaction_info_ptr->deleteUpstreamRequest("10.0.0.1"); + upstream_request_ptr = transaction_info_ptr->getUpstreamRequest("10.0.0.1"); + EXPECT_EQ(nullptr, upstream_request_ptr); +} + +TEST_F(SipRouterTest, DiffRouter) { + initializeTrans(false); + initializeRouter(); + router_->metadataMatchCriteria(); + EXPECT_EQ(nullptr, router_->metadataMatchCriteria()); + initializeTransaction(); + EXPECT_EQ(router_->metadataMatchCriteria(), route_entry_.metadataMatchCriteria()); + startRequest(MsgType::Request); + + initializeRouter(); + startRequest(MsgType::Request); +} + +TEST_F(SipRouterTest, DiffRouterDiffTrans) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + + initializeRouter(); + + initializeMetadata(MsgType::Request, MethodType::Invite); + EXPECT_EQ(FilterStatus::Continue, router_->transportBegin(metadata_)); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + + metadata_->setTransactionId("cluster"); + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); +} + +TEST_F(SipRouterTest, DiffDestination) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + initializeMetadata(MsgType::Request, MethodType::Register); + metadata_->setEP("10.0.0.1"); + EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_)); + + initializeRouter(); + initializeMetadata(MsgType::Request, MethodType::Register); + metadata_->setEP("10.0.0.1"); + metadata_->setDestination("10.0.0.1"); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); +} + +TEST_F(SipRouterTest, DiffDestinationDiffTrans) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + + initializeRouter(); + initializeMetadata(MsgType::Request, MethodType::Ack); + metadata_->setDestination("10.0.0.1"); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + + metadata_->setTransactionId("cluster"); + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); +} + +TEST_F(SipRouterTest, DiffDestinationNoTrans) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + + initializeRouter(); + initializeMetadata(MsgType::Request, MethodType::Ack); + metadata_->setDestination("10.0.0.1"); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + + metadata_->setTransactionId(""); + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); +} + +TEST_F(SipRouterTest, NoDestination) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + + initializeMetadata(MsgType::Request, MethodType::Invite, false); + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); +} + +TEST_F(SipRouterTest, CallNoRouter) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + initializeMetadata(MsgType::Request); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(nullptr)); + EXPECT_CALL(callbacks_, sendLocalReply(_, _)) + .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void { + auto& app_ex = dynamic_cast(response); + EXPECT_EQ(AppExceptionType::UnknownMethod, app_ex.type_); + EXPECT_THAT(app_ex.what(), ContainsRegex(".*no route.*")); + EXPECT_TRUE(end_stream); + })); + EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_)); + EXPECT_EQ(1U, context_.scope().counterFromString("test.route_missing").value()); + + destroyRouterOutofRange(); +} + +TEST_F(SipRouterTest, CallNoCluster) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + initializeMetadata(MsgType::Request); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + EXPECT_CALL(context_.cluster_manager_, getThreadLocalCluster(Eq(cluster_name_))) + .WillOnce(Return(nullptr)); + EXPECT_CALL(callbacks_, sendLocalReply(_, _)) + .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void { + auto& app_ex = dynamic_cast(response); + EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_); + EXPECT_THAT(app_ex.what(), ContainsRegex(".*unknown cluster.*")); + EXPECT_TRUE(end_stream); + })); + EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_)); + EXPECT_EQ(1U, context_.scope().counterFromString("test.unknown_cluster").value()); + + destroyRouter(); +} + +TEST_F(SipRouterTest, ClusterMaintenanceMode) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + initializeMetadata(MsgType::Request); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); + EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.cluster_.info_, maintenanceMode()) + .WillOnce(Return(true)); + + EXPECT_CALL(callbacks_, sendLocalReply(_, _)) + .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void { + auto& app_ex = dynamic_cast(response); + EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_); + EXPECT_THAT(app_ex.what(), ContainsRegex(".*maintenance mode.*")); + EXPECT_TRUE(end_stream); + })); + EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_)); + EXPECT_EQ(1U, context_.scope().counterFromString("test.upstream_rq_maintenance_mode").value()); + destroyRouter(); +} + +TEST_F(SipRouterTest, NoHealthyHosts) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + initializeMetadata(MsgType::Request); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillOnce(ReturnRef(cluster_name_)); + EXPECT_CALL(context_.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) + .WillOnce(Return(absl::nullopt)); + + EXPECT_CALL(callbacks_, sendLocalReply(_, _)) + .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void { + auto& app_ex = dynamic_cast(response); + EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_); + EXPECT_THAT(app_ex.what(), ContainsRegex(".*no healthy upstream.*")); + EXPECT_TRUE(end_stream); + })); + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); + EXPECT_EQ(1U, context_.scope().counterFromString("test.no_healthy_upstream").value()); + destroyRouter(); +} + +TEST_F(SipRouterTest, NoHost) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + initializeMetadata(MsgType::Request); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillOnce(ReturnRef(cluster_name_)); + + EXPECT_CALL(context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_, host()) + .WillOnce(Return(nullptr)); + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); + destroyRouter(); +} + +TEST_F(SipRouterTest, NoNewConnection) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + initializeMetadata(MsgType::Request); + + EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); + EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); + EXPECT_CALL(route_entry_, clusterName()).WillOnce(ReturnRef(cluster_name_)); + + EXPECT_CALL(context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_, newConnection(_)) + .WillOnce(Return(nullptr)); + + EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_)); + destroyRouter(); +} + +TEST_F(SipRouterTest, CallWithExistingConnection) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + connectUpstream(); + completeRequest(); + returnResponse(); + metadata_->setDestination("10.0.0.1"); + router_->cleanup(); + startRequestWithExistingConnection(MsgType::Request); + destroyRouter(); +} + +TEST_F(SipRouterTest, PoolFailure) { + initializeTrans(); + initializeRouterWithCallback(); + initializeTransaction(); + startRequest(MsgType::Request); + // connectUpstream(); + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolFailure( + ConnectionPool::PoolFailureReason::RemoteConnectionFailure); + completeRequest(); +} + +TEST_F(SipRouterTest, NewConnectionFailure) { + initializeTrans(); + initializeRouterWithCallback(); + initializeTransaction(); + EXPECT_CALL(context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_, newConnection(_)) + .WillOnce( + Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* { + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.newConnectionImpl(cb); + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolReady( + upstream_connection_); + return nullptr; + })); + startRequest(MsgType::Request); +} + +TEST_F(SipRouterTest, UpstreamCloseMidResponse) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + connectUpstream(); + + upstream_callbacks_->onEvent(Network::ConnectionEvent::LocalClose); + upstream_callbacks_->onEvent(Network::ConnectionEvent::RemoteClose); + // Panic: NOT_REACHED_GCOVR_EXCL_LINE + // upstream_callbacks_->onEvent(static_cast(9999)); +} + +TEST_F(SipRouterTest, RouteEntryImplBase) { + const envoy::extensions::filters::network::sip_proxy::v3alpha::Route route; + GeneralRouteEntryImpl* base = new GeneralRouteEntryImpl(route); + EXPECT_EQ("", base->clusterName()); + EXPECT_EQ(base, base->routeEntry()); + EXPECT_EQ(nullptr, base->metadataMatchCriteria()); +} + +envoy::extensions::filters::network::sip_proxy::v3alpha::RouteConfiguration +parseConfigFromYaml(const std::string& yaml) { + envoy::extensions::filters::network::sip_proxy::v3alpha::RouteConfiguration route; + TestUtility::loadFromYaml(yaml, route); + return route; +} + +TEST_F(SipRouterTest, RouteMatcher) { + + const std::string yaml = R"EOF( + name: local_route + routes: + match: + domain: pcsf-cfed.cncs.svc.cluster.local + route: + cluster: A +)EOF"; + + envoy::extensions::filters::network::sip_proxy::v3alpha::RouteConfiguration config; + TestUtility::loadFromYaml(yaml, config); + + initializeMetadata(MsgType::Request); + auto matcher_ptr = std::make_shared(config); + + // Match domain + metadata_->setDomain("", + "x-suri"); + matcher_ptr->route(*metadata_); + + // Not match domain + metadata_->setDomain( + "", + "x-suri"); + matcher_ptr->route(*metadata_); +} + +TEST_F(SipRouterTest, HandlePendingRequest) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + connectUpstream(); + completeRequest(); + + auto& transaction_info_ptr = (*transaction_infos_)[cluster_name_]; + EXPECT_NE(nullptr, transaction_info_ptr); + std::shared_ptr upstream_request_ptr = + transaction_info_ptr->getUpstreamRequest("10.0.0.1"); + EXPECT_NE(nullptr, upstream_request_ptr); + upstream_request_ptr->addIntoPendingRequest(metadata_); + // trigger full + upstream_request_ptr->onRequestStart(); + + for (int i = 0; i < 1000003; i++) { + upstream_request_ptr->addIntoPendingRequest(metadata_); + } + + upstream_request_ptr->resetStream(); + + // Other UpstreamRequest in definition + upstream_request_ptr->onAboveWriteBufferHighWatermark(); + upstream_request_ptr->onBelowWriteBufferLowWatermark(); +} + +TEST_F(SipRouterTest, ResponseDecoder) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + + initializeMetadata(MsgType::Response, MethodType::Ok200); + auto& transaction_info_ptr = (*transaction_infos_)[cluster_name_]; + EXPECT_NE(nullptr, transaction_info_ptr); + std::shared_ptr upstream_request_ptr = + transaction_info_ptr->getUpstreamRequest("10.0.0.1"); + EXPECT_NE(nullptr, upstream_request_ptr); + std::shared_ptr response_decoder_ptr = + std::make_shared(*upstream_request_ptr); + EXPECT_EQ(FilterStatus::Continue, response_decoder_ptr->transportBegin(metadata_)); + EXPECT_EQ(FilterStatus::Continue, response_decoder_ptr->messageBegin(metadata_)); + EXPECT_EQ(FilterStatus::Continue, response_decoder_ptr->messageEnd()); + EXPECT_EQ(FilterStatus::Continue, response_decoder_ptr->transportEnd()); + response_decoder_ptr->newDecoderEventHandler(metadata_); + + // No active trans + metadata_->setTransactionId(nullptr); + EXPECT_EQ(FilterStatus::Continue, response_decoder_ptr->transportBegin(metadata_)); + // No transid + metadata_->resetTransactionId(); + EXPECT_EQ(FilterStatus::StopIteration, response_decoder_ptr->transportBegin(metadata_)); +} + +TEST_F(SipRouterTest, TransactionInfoItem) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + + initializeMetadata(MsgType::Request); + auto& transaction_info_ptr = (*transaction_infos_)[cluster_name_]; + EXPECT_NE(nullptr, transaction_info_ptr); + std::shared_ptr upstream_request_ptr = + transaction_info_ptr->getUpstreamRequest("10.0.0.1"); + EXPECT_NE(nullptr, upstream_request_ptr); + + std::shared_ptr item = + std::make_shared(&callbacks_, upstream_request_ptr); + item->appendMessageList(metadata_); + item->resetTrans(); + EXPECT_NE(nullptr, item->upstreamRequest()); + EXPECT_EQ(false, item->deleted()); +} + +TEST_F(SipRouterTest, Audit) { + initializeTrans(); + initializeRouter(); + initializeTransaction(); + startRequest(MsgType::Request); + + initializeMetadata(MsgType::Request); + auto& transaction_info_ptr = (*transaction_infos_)[cluster_name_]; + EXPECT_NE(nullptr, transaction_info_ptr); + std::shared_ptr upstream_request_ptr = + transaction_info_ptr->getUpstreamRequest("10.0.0.1"); + EXPECT_NE(nullptr, upstream_request_ptr); + + std::shared_ptr item = + std::make_shared(&callbacks_, upstream_request_ptr); + std::shared_ptr itemToDelete = + std::make_shared(&callbacks_, upstream_request_ptr); + itemToDelete->toDelete(); + ThreadLocalTransactionInfo threadInfo(transaction_info_ptr, dispatcher_, + std::chrono::milliseconds(0), "", "x-suri"); + threadInfo.transaction_info_map_.emplace(cluster_name_, item); + threadInfo.transaction_info_map_.emplace("test1", itemToDelete); + threadInfo.auditTimerAction(); +} + +} // namespace Router +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sip_proxy/filters/network/test/utility.h b/contrib/sip_proxy/filters/network/test/utility.h new file mode 100644 index 0000000000000..c774d5d2e823c --- /dev/null +++ b/contrib/sip_proxy/filters/network/test/utility.h @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include "source/common/buffer/buffer_impl.h" +#include "source/common/common/byte_order.h" + +#include "test/common/buffer/utility.h" + +#include "absl/strings/ascii.h" +#include "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.pb.h" +#include "contrib/sip_proxy/filters/network/source/sip.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace SipProxy { +namespace { + +using Envoy::Buffer::addRepeated; // NOLINT(misc-unused-using-decls) +using Envoy::Buffer::addSeq; // NOLINT(misc-unused-using-decls) + +MATCHER_P2(HasAppException, t, m, "") { + if (!arg.hasAppException()) { + *result_listener << "has no exception"; + return false; + } + + if (arg.appExceptionType() != t) { + *result_listener << "has exception with type " << static_cast(arg.appExceptionType()); + return false; + } + + if (std::string(m) != arg.appExceptionMessage()) { + *result_listener << "has exception with message " << arg.appExceptionMessage(); + return false; + } + + return true; +} + +} // namespace +} // namespace SipProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/squash/filters/http/test/config_test.cc b/contrib/squash/filters/http/test/config_test.cc index 613dc5b67e9ad..82647ecf664fe 100644 --- a/contrib/squash/filters/http/test/config_test.cc +++ b/contrib/squash/filters/http/test/config_test.cc @@ -36,11 +36,12 @@ TEST(SquashFilterConfigFactoryTest, SquashFilterCorrectYaml) { cb(filter_callback); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(SquashFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.squash"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/contrib/sxg/filters/http/test/filter_test.cc b/contrib/sxg/filters/http/test/filter_test.cc index 0ec8fedcdd5fe..7853ac0590b9b 100644 --- a/contrib/sxg/filters/http/test/filter_test.cc +++ b/contrib/sxg/filters/http/test/filter_test.cc @@ -365,7 +365,7 @@ TEST_F(FilterTest, SdsDynamicGenericSecret) { NiceMock dispatcher; EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(init_manager, add(_)) diff --git a/docs/BUILD b/docs/BUILD index 51f3e2b4a1cd3..775644422374b 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -2,6 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +load("@rules_pkg//:mappings.bzl", "pkg_filegroup", "pkg_files") load("@rules_pkg//:pkg.bzl", "pkg_tar") licenses(["notice"]) # Apache 2 @@ -69,50 +70,6 @@ genrule( ], ) -pkg_tar( - name = "sphinx_base", - srcs = glob( - [ - "conf.py", - "_ext/*", - ], - ) + [":redirects"], - extension = "tar", - strip_prefix = "/docs/", -) - -pkg_tar( - name = "sphinx_root", - srcs = glob(["root/**/*"]), - extension = "tar", - strip_prefix = "/docs/root", -) - -pkg_tar( - name = "base_rst", - extension = "tar", - deps = [ - ":sphinx_base.tar", - ":sphinx_root.tar", - ], -) - -pkg_tar( - name = "google_vrp_config", - srcs = ["//configs:google-vrp/envoy-edge.yaml"], - extension = "tar", - package_dir = "/best_practices", - strip_prefix = "/configs/configuration", -) - -pkg_tar( - name = "examples_rst", - srcs = ["//examples:files"], - extension = "tar", - package_dir = "/start/sandboxes/_include", - strip_prefix = "/examples", -) - genrule( name = "extensions_security_rst", srcs = [ @@ -132,8 +89,8 @@ genrule( name = "external_deps_rst", srcs = [ "//bazel:repository_locations.bzl", - "@envoy_api_canonical//bazel:repository_locations.bzl", - "@envoy_api_canonical//bazel:repository_locations_utils.bzl", + "@envoy_api//bazel:repository_locations.bzl", + "@envoy_api//bazel:repository_locations_utils.bzl", ], outs = ["external_deps_rst.tar"], cmd = "$(location //tools/docs:generate_external_deps_rst) $@", @@ -142,8 +99,8 @@ genrule( genquery( name = "v3_proto_srcs", - expression = "labels(srcs, labels(deps, @envoy_api_canonical//:v3_protos))", - scope = ["@envoy_api_canonical//:v3_protos"], + expression = "labels(srcs, labels(deps, @envoy_api//:v3_protos))", + scope = ["@envoy_api//:v3_protos"], ) genrule( @@ -171,14 +128,54 @@ genrule( tools = ["//tools/docs:generate_api_rst"], ) +pkg_files( + name = "sphinx_base", + srcs = glob( + [ + "conf.py", + "_ext/*", + ], + ) + [":redirects"], + strip_prefix = "/docs", +) + +pkg_files( + name = "sphinx_root", + srcs = glob(["root/**/*"]), + strip_prefix = "/docs/root", +) + +# TODO(phlax): this appears unused, fix or remove +pkg_files( + name = "google_vrp_config", + srcs = ["//configs:google-vrp/envoy-edge.yaml"], + prefix = "config/best_practices", + strip_prefix = "/configs", +) + +pkg_files( + name = "examples_rst", + srcs = ["//examples:files"], + prefix = "start/sandboxes/_include", + strip_prefix = "/examples", +) + +pkg_filegroup( + name = "rst_files", + srcs = [ + ":examples_rst", + ":sphinx_base", + ":sphinx_root", + ], +) + pkg_tar( name = "rst", + srcs = [":rst_files"], extension = "tar", deps = [ ":api_rst", - ":base_rst.tar", ":empty_protos_rst", - ":examples_rst.tar", ":extensions_security_rst", ":external_deps_rst", ], diff --git a/docs/build.sh b/docs/build.sh index 55e0462fc880e..dbcea9d91254f 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -16,14 +16,22 @@ if [[ ! $(command -v jq) ]]; then exit 1 fi +MAIN_BRANCH="refs/heads/main" RELEASE_TAG_REGEX="^refs/tags/v.*" +# default is to build html only +BUILD_TYPE=html + if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then DOCS_TAG="${AZP_BRANCH/refs\/tags\//}" export DOCS_TAG else BUILD_SHA=$(git rev-parse HEAD) export BUILD_SHA + if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then + # no need to build html, just rst + BUILD_TYPE=rst + fi fi # This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files. @@ -33,9 +41,30 @@ BAZEL_BUILD_OPTIONS+=( "--action_env=BUILD_SHA" "--action_env=SPHINX_SKIP_CONFIG_VALIDATION") -bazel build "${BAZEL_BUILD_OPTIONS[@]}" //docs:html +# Building html/rst is determined by then needs of CI but can be overridden in dev. +if [[ "${BUILD_TYPE}" == "html" ]] || [[ -n "${DOCS_BUILD_HTML}" ]]; then + BUILD_HTML=1 +fi +if [[ "${BUILD_TYPE}" == "rst" ]] || [[ -n "${DOCS_BUILD_RST}" ]]; then + BUILD_RST=1 +fi + +# Build html/rst +if [[ -n "${BUILD_RST}" ]]; then + bazel build "${BAZEL_BUILD_OPTIONS[@]}" //docs:rst +fi +if [[ -n "${BUILD_HTML}" ]]; then + bazel build "${BAZEL_BUILD_OPTIONS[@]}" //docs:html +fi [[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs rm -rf "${DOCS_OUTPUT_DIR}" mkdir -p "${DOCS_OUTPUT_DIR}" -tar -xf bazel-bin/docs/html.tar -C "$DOCS_OUTPUT_DIR" + +# Save html/rst to output directory +if [[ -n "${BUILD_HTML}" ]]; then + tar -xf bazel-bin/docs/html.tar -C "$DOCS_OUTPUT_DIR" +fi +if [[ -n "${BUILD_RST}" ]]; then + gzip -c bazel-bin/docs/rst.tar > "$DOCS_OUTPUT_DIR"/envoy-docs-rst.tar.gz +fi diff --git a/docs/conf.py b/docs/conf.py index fb1b0afb11400..5b3a225281ea9 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -312,47 +312,19 @@ def _config(key): rediraffe_redirects = "envoy-redirects.txt" intersphinx_mapping = { - 'v1.5.0': ('https://www.envoyproxy.io/docs/envoy/v1.5.0', None), - 'v1.6.0': ('https://www.envoyproxy.io/docs/envoy/v1.6.0', None), - 'v1.7.0': ('https://www.envoyproxy.io/docs/envoy/v1.7.1', None), - 'v1.8.0': ('https://www.envoyproxy.io/docs/envoy/v1.8.0', None), - 'v1.9.0': ('https://www.envoyproxy.io/docs/envoy/v1.9.0', None), - 'v1.9.1': ('https://www.envoyproxy.io/docs/envoy/v1.9.1', None), - 'v1.10.0': ('https://www.envoyproxy.io/docs/envoy/v1.10.0', None), - 'v1.11.0': ('https://www.envoyproxy.io/docs/envoy/v1.11.0', None), - 'v1.11.1': ('https://www.envoyproxy.io/docs/envoy/v1.11.1', None), - 'v1.11.2': ('https://www.envoyproxy.io/docs/envoy/v1.11.2', None), - 'v1.12.0': ('https://www.envoyproxy.io/docs/envoy/v1.12.0', None), - 'v1.12.2': ('https://www.envoyproxy.io/docs/envoy/v1.12.2', None), - 'v1.12.3': ('https://www.envoyproxy.io/docs/envoy/v1.12.3', None), - 'v1.12.4': ('https://www.envoyproxy.io/docs/envoy/v1.12.4', None), - 'v1.12.5': ('https://www.envoyproxy.io/docs/envoy/v1.12.5', None), - 'v1.12.6': ('https://www.envoyproxy.io/docs/envoy/v1.12.6', None), - 'v1.13.0': ('https://www.envoyproxy.io/docs/envoy/v1.13.0', None), - 'v1.13.1': ('https://www.envoyproxy.io/docs/envoy/v1.13.1', None), - 'v1.13.2': ('https://www.envoyproxy.io/docs/envoy/v1.13.2', None), - 'v1.13.3': ('https://www.envoyproxy.io/docs/envoy/v1.13.3', None), - 'v1.14.0': ('https://www.envoyproxy.io/docs/envoy/v1.14.0', None), - 'v1.14.2': ('https://www.envoyproxy.io/docs/envoy/v1.14.2', None), - 'v1.14.3': ('https://www.envoyproxy.io/docs/envoy/v1.14.3', None), - 'v1.14.7': ('https://www.envoyproxy.io/docs/envoy/v1.14.7', None), - 'v1.15.0': ('https://www.envoyproxy.io/docs/envoy/v1.15.0', None), - 'v1.15.4': ('https://www.envoyproxy.io/docs/envoy/v1.15.4', None), - 'v1.15.5': ('https://www.envoyproxy.io/docs/envoy/v1.15.5', None), - 'v1.16.0': ('https://www.envoyproxy.io/docs/envoy/v1.16.0', None), - 'v1.16.1': ('https://www.envoyproxy.io/docs/envoy/v1.16.1', None), - 'v1.16.2': ('https://www.envoyproxy.io/docs/envoy/v1.16.2', None), - 'v1.16.3': ('https://www.envoyproxy.io/docs/envoy/v1.16.3', None), - 'v1.16.4': ('https://www.envoyproxy.io/docs/envoy/v1.16.4', None), - 'v1.16.5': ('https://www.envoyproxy.io/docs/envoy/v1.16.5', None), - 'v1.17.0': ('https://www.envoyproxy.io/docs/envoy/v1.17.0', None), - 'v1.17.1': ('https://www.envoyproxy.io/docs/envoy/v1.17.1', None), - 'v1.17.2': ('https://www.envoyproxy.io/docs/envoy/v1.17.2', None), - 'v1.17.3': ('https://www.envoyproxy.io/docs/envoy/v1.17.3', None), - 'v1.17.4': ('https://www.envoyproxy.io/docs/envoy/v1.17.4', None), - 'v1.18.0': ('https://www.envoyproxy.io/docs/envoy/v1.18.2', None), - 'v1.18.3': ('https://www.envoyproxy.io/docs/envoy/v1.18.3', None), - 'v1.18.4': ('https://www.envoyproxy.io/docs/envoy/v1.18.4', None), - 'v1.19.0': ('https://www.envoyproxy.io/docs/envoy/v1.19.0', None), - 'v1.19.1': ('https://www.envoyproxy.io/docs/envoy/v1.19.1', None), + 'v1.5': ('https://www.envoyproxy.io/docs/envoy/v1.5.0', None), + 'v1.6': ('https://www.envoyproxy.io/docs/envoy/v1.6.0', None), + 'v1.7': ('https://www.envoyproxy.io/docs/envoy/v1.7.1', None), + 'v1.8': ('https://www.envoyproxy.io/docs/envoy/v1.8.0', None), + 'v1.9': ('https://www.envoyproxy.io/docs/envoy/v1.9.1', None), + 'v1.10': ('https://www.envoyproxy.io/docs/envoy/v1.10.0', None), + 'v1.11': ('https://www.envoyproxy.io/docs/envoy/v1.11.2', None), + 'v1.12': ('https://www.envoyproxy.io/docs/envoy/v1.12.6', None), + 'v1.13': ('https://www.envoyproxy.io/docs/envoy/v1.13.3', None), + 'v1.14': ('https://www.envoyproxy.io/docs/envoy/v1.14.7', None), + 'v1.15': ('https://www.envoyproxy.io/docs/envoy/v1.15.5', None), + 'v1.16': ('https://www.envoyproxy.io/docs/envoy/v1.16.5', None), + 'v1.17': ('https://www.envoyproxy.io/docs/envoy/v1.17.4', None), + 'v1.18': ('https://www.envoyproxy.io/docs/envoy/v1.18.4', None), + 'v1.19': ('https://www.envoyproxy.io/docs/envoy/v1.19.1', None), } diff --git a/docs/publish.sh b/docs/publish.sh index 1c65cdecae50f..81f01416338bd 100755 --- a/docs/publish.sh +++ b/docs/publish.sh @@ -19,7 +19,11 @@ RELEASE_TAG_REGEX="^refs/tags/v.*" if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then PUBLISH_DIR="${CHECKOUT_DIR}"/docs/envoy/"${AZP_BRANCH/refs\/tags\//}" elif [[ "$AZP_BRANCH" == "${MAIN_BRANCH}" ]]; then - PUBLISH_DIR="${CHECKOUT_DIR}"/docs/envoy/latest + if [[ -n "$NETLIFY_TRIGGER_URL" ]]; then + echo "Triggering netlify docs build for (${BUILD_SHA})" + curl -X POST -d "$BUILD_SHA" "$NETLIFY_TRIGGER_URL" + fi + exit 0 else echo "Ignoring docs push" exit 0 diff --git a/docs/root/_static/slow_start_aggression.svg b/docs/root/_static/slow_start_aggression.svg new file mode 100644 index 0000000000000..aac119a0b335d --- /dev/null +++ b/docs/root/_static/slow_start_aggression.svg @@ -0,0 +1,2049 @@ + + + + + + + + 2021-04-26T00:13:24.988771 + image/svg+xml + + + Matplotlib v3.4.1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/root/_static/slow_start_example.svg b/docs/root/_static/slow_start_example.svg new file mode 100644 index 0000000000000..9bd88ce1401fc --- /dev/null +++ b/docs/root/_static/slow_start_example.svg @@ -0,0 +1,1053 @@ + + + + + + + + 2021-09-10T13:39:07.873353 + image/svg+xml + + + Matplotlib v3.4.1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index ddfb0fe7bb0c5..d14a59db966c2 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -20,7 +20,7 @@ Common messages ../config/core/v3/socket_option.proto ../config/core/v3/udp_socket_config.proto ../config/core/v3/substitution_format_string.proto - ../extensions/common/key_value/v3/config.proto + ../config/common/key_value/v3/config.proto ../extensions/common/ratelimit/v3/ratelimit.proto ../extensions/filters/common/fault/v3/fault.proto ../extensions/network/socket_interface/v3/default_socket_interface.proto diff --git a/docs/root/api-v3/config/accesslog/accesslog.rst b/docs/root/api-v3/config/accesslog/accesslog.rst index ecd70c2f80d2d..dae49773f7881 100644 --- a/docs/root/api-v3/config/accesslog/accesslog.rst +++ b/docs/root/api-v3/config/accesslog/accesslog.rst @@ -9,4 +9,3 @@ Access loggers v3/* ../../extensions/access_loggers/*/v3/* - ../../extensions/access_loggers/*/v3alpha/* diff --git a/docs/root/api-v3/config/config.rst b/docs/root/api-v3/config/config.rst index 8489a765f6298..d79d3cd79c175 100644 --- a/docs/root/api-v3/config/config.rst +++ b/docs/root/api-v3/config/config.rst @@ -31,3 +31,5 @@ Extensions stat_sinks/stat_sinks quic/quic_extensions formatter/formatter + contrib/contrib + rbac/matchers diff --git a/docs/root/api-v3/config/contrib/contrib.rst b/docs/root/api-v3/config/contrib/contrib.rst new file mode 100644 index 0000000000000..42596047a775f --- /dev/null +++ b/docs/root/api-v3/config/contrib/contrib.rst @@ -0,0 +1,11 @@ +.. _api-v3_config_contrib: + +Contrib Extensions +================== + +.. toctree:: + :glob: + :maxdepth: 2 + + sip/sip + cryptomb/cryptomb diff --git a/docs/root/api-v3/config/contrib/cryptomb/cryptomb.rst b/docs/root/api-v3/config/contrib/cryptomb/cryptomb.rst new file mode 100644 index 0000000000000..9f56bc9c0b8ac --- /dev/null +++ b/docs/root/api-v3/config/contrib/cryptomb/cryptomb.rst @@ -0,0 +1,5 @@ +.. toctree:: + :glob: + :maxdepth: 2 + + ../../../extensions/private_key_providers/cryptomb/v3alpha/* diff --git a/docs/root/api-v3/config/contrib/sip/sip.rst b/docs/root/api-v3/config/contrib/sip/sip.rst new file mode 100644 index 0000000000000..bfc7d61c6cf43 --- /dev/null +++ b/docs/root/api-v3/config/contrib/sip/sip.rst @@ -0,0 +1,6 @@ +.. toctree:: + :glob: + :maxdepth: 2 + + ../../../extensions/filters/network/sip_proxy/router/v3alpha/* + ../../../extensions/filters/network/sip_proxy/v3alpha/* diff --git a/docs/root/api-v3/config/filter/http/http.rst b/docs/root/api-v3/config/filter/http/http.rst index 861a920b5a8ec..51b746e7edb6d 100644 --- a/docs/root/api-v3/config/filter/http/http.rst +++ b/docs/root/api-v3/config/filter/http/http.rst @@ -7,3 +7,5 @@ HTTP filters */empty/* ../../../extensions/filters/http/*/v3*/* + ../../../extensions/cache/*/v3*/* + diff --git a/docs/root/api-v3/config/filter/udp/udp.rst b/docs/root/api-v3/config/filter/udp/udp.rst index c430280ca06a9..880e066946e20 100644 --- a/docs/root/api-v3/config/filter/udp/udp.rst +++ b/docs/root/api-v3/config/filter/udp/udp.rst @@ -6,4 +6,3 @@ UDP listener filters :maxdepth: 2 ../../../extensions/filters/udp/*/v3/* - ../../../extensions/filters/udp/*/v3alpha/* diff --git a/docs/root/api-v3/config/rbac/matchers.rst b/docs/root/api-v3/config/rbac/matchers.rst new file mode 100644 index 0000000000000..d32ce66750b86 --- /dev/null +++ b/docs/root/api-v3/config/rbac/matchers.rst @@ -0,0 +1,8 @@ +RBAC Matchers +============= + +.. toctree:: + :glob: + :maxdepth: 2 + + matchers/matchers diff --git a/docs/root/api-v3/config/rbac/matchers/matchers.rst b/docs/root/api-v3/config/rbac/matchers/matchers.rst new file mode 100644 index 0000000000000..ce6d5d4a20dd5 --- /dev/null +++ b/docs/root/api-v3/config/rbac/matchers/matchers.rst @@ -0,0 +1,8 @@ +RBAC Matchers +=== + +.. toctree:: + :glob: + :maxdepth: 2 + + upstream/upstream diff --git a/docs/root/api-v3/config/rbac/matchers/upstream/upstream.rst b/docs/root/api-v3/config/rbac/matchers/upstream/upstream.rst new file mode 100644 index 0000000000000..7bf4de6964520 --- /dev/null +++ b/docs/root/api-v3/config/rbac/matchers/upstream/upstream.rst @@ -0,0 +1,8 @@ +Upstream Matchers +================= + +.. toctree:: + :glob: + :maxdepth: 2 + + ../../../../extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.proto diff --git a/docs/root/api-v3/config/watchdog/watchdog.rst b/docs/root/api-v3/config/watchdog/watchdog.rst index 8a8ab843cb0e2..439698b3a9439 100644 --- a/docs/root/api-v3/config/watchdog/watchdog.rst +++ b/docs/root/api-v3/config/watchdog/watchdog.rst @@ -11,5 +11,5 @@ Watchdog :glob: :maxdepth: 2 - ../../extensions/watchdog/profile_action/v3alpha/* - ../../watchdog/v3alpha/* + ../../extensions/watchdog/profile_action/v3/* + ../../watchdog/v3/* diff --git a/docs/root/api-v3/service/service.rst b/docs/root/api-v3/service/service.rst index d651856c678b7..a65686099df1f 100644 --- a/docs/root/api-v3/service/service.rst +++ b/docs/root/api-v3/service/service.rst @@ -17,3 +17,4 @@ Services ../config/tap/v3/* trace/v3/* extension/v3/* + ext_proc/v3/* diff --git a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst index b051545ea9253..9d89ae70c28b8 100644 --- a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst @@ -16,7 +16,9 @@ The following Envoy filters emit dynamic metadata that other filters can leverag * :ref:`External Authorization Filter ` * :ref:`External Authorization Network Filter ` * :ref:`Header-To-Metadata Filter ` -* :ref:`JWT Authentication Filter ` +* :ref:`JWT Authentication Filter ` for the extracted + :ref:`header ` + and :ref:`payload ` * :ref:`Mongo Proxy Filter ` * :ref:`MySQL Proxy Filter ` * :ref:`Postgres Proxy Filter ` diff --git a/docs/root/configuration/http/http_conn_man/response_code_details.rst b/docs/root/configuration/http/http_conn_man/response_code_details.rst index 342350c6a4f8c..58acfe7b9c519 100644 --- a/docs/root/configuration/http/http_conn_man/response_code_details.rst +++ b/docs/root/configuration/http/http_conn_man/response_code_details.rst @@ -101,6 +101,8 @@ All http2 details are rooted at *http2.* http2.unexpected_underscore, Envoy was configured to drop requests with header keys beginning with underscores. http2.unknown.nghttp2.error, An unknown error was encountered by nghttp2 http2.violation.of.messaging.rule, The stream was in violation of a HTTP/2 messaging rule. + http2.remote_refuse, The peer refused the stream. + http2.remote_reset, The peer reset the stream. Http3 details ~~~~~~~~~~~~~ @@ -116,4 +118,6 @@ All http3 details are rooted at *http3.* http3.unexpected_underscore, Envoy was configured to drop or reject requests with header keys beginning with underscores. http3.too_many_headers, Either incoming request or response headers contained too many headers. http3.too_many_trailers, Either incoming request or response trailers contained too many entries. + http3.remote_refuse, The peer refused the stream. + http3.remote_reset, The peer reset the stream. diff --git a/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml b/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml index 00d3415149f52..05674eb5dd91a 100644 --- a/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml +++ b/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml @@ -21,7 +21,7 @@ static_resources: route: {cluster: service_protected_by_bandwidth_limit} typed_per_filter_config: envoy.filters.http.bandwidth_limit: - "@type": type.googleapis.com/envoy.extensions.filters.http.bandwidth_limit.v3alpha.BandwidthLimit + "@type": type.googleapis.com/envoy.extensions.filters.http.bandwidth_limit.v3.BandwidthLimit stat_prefix: bandwidth_limiter_custom_route enable_mode: REQUEST_AND_RESPONSE limit_kbps: 500 @@ -31,7 +31,7 @@ static_resources: http_filters: - name: envoy.filters.http.bandwidth_limit typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.bandwidth_limit.v3alpha.BandwidthLimit + "@type": type.googleapis.com/envoy.extensions.filters.http.bandwidth_limit.v3.BandwidthLimit stat_prefix: bandwidth_limiter_default - name: envoy.filters.http.router clusters: diff --git a/docs/root/configuration/http/http_filters/admission_control_filter.rst b/docs/root/configuration/http/http_filters/admission_control_filter.rst index ac974b2f4067d..bc08bc402b81e 100644 --- a/docs/root/configuration/http/http_filters/admission_control_filter.rst +++ b/docs/root/configuration/http/http_filters/admission_control_filter.rst @@ -7,7 +7,7 @@ Admission Control The admission control filter is experimental and is currently under active development. -See the :ref:`v3 API reference ` for details on each configuration parameter. +See the :ref:`v3 API reference ` for details on each configuration parameter. Overview -------- @@ -56,11 +56,11 @@ Note that there are additional parameters that affect the rejection probability: Health check traffic does not count towards any of the filter's measurements. See the :ref:`v3 API reference -` for more +` for more details on this parameter. The definition of a successful request is a :ref:`configurable parameter -` +` for both HTTP and gRPC requests. Aggression @@ -85,7 +85,7 @@ fields can be overridden via runtime settings. name: envoy.filters.http.admission_control typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl + "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl enabled: default_value: true runtime_key: "admission_control.enabled" diff --git a/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst b/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst index 4576e9d3ac36a..fa0b6d27b96c6 100644 --- a/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst +++ b/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst @@ -4,12 +4,12 @@ Bandwidth limit ==================== * Bandwidth limiting :ref:`architecture overview ` -* :ref:`v3 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name ``envoy.filters.http.bandwidth_limit``. The HTTP Bandwidth limit filter limits the size of data flow to the max bandwidth set in the ``limit_kbps`` when the request's route, virtual host or filter chain has a -:ref:`bandwidth limit configuration `. +:ref:`bandwidth limit configuration `. If the bandwidth limit has been exhausted the filter stops further transfer until more bandwidth gets allocated according to the ``fill_interval`` (default is 50 milliseconds). If the connection buffer fills up with accumulated @@ -60,5 +60,5 @@ Runtime The HTTP bandwidth limit filter supports the following runtime settings: The bandwidth limit filter can be runtime feature flagged via the :ref:`enabled -` +` configuration field. diff --git a/docs/root/configuration/http/http_filters/cdn_loop_filter.rst b/docs/root/configuration/http/http_filters/cdn_loop_filter.rst index 5b81d1be25226..c89ea52c59f5b 100644 --- a/docs/root/configuration/http/http_filters/cdn_loop_filter.rst +++ b/docs/root/configuration/http/http_filters/cdn_loop_filter.rst @@ -26,7 +26,7 @@ Configuration The filter is configured with the name *envoy.filters.http.cdn_loop*. -The `filter config `_ has two fields. +The :ref:`filter config ` has two fields. * The *cdn_id* field sets the identifier that the filter will look for within and append to the CDN-Loop header. RFC 8586 calls this field the "cdn-id"; "cdn-id" can either be a pseudonym or a diff --git a/docs/root/configuration/http/http_filters/cors_filter.rst b/docs/root/configuration/http/http_filters/cors_filter.rst index f7109ef6eaa91..5a49769fed29a 100644 --- a/docs/root/configuration/http/http_filters/cors_filter.rst +++ b/docs/root/configuration/http/http_filters/cors_filter.rst @@ -8,7 +8,7 @@ For the meaning of the headers please refer to the pages below. * https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS * https://www.w3.org/TR/cors/ -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.cors*. .. _cors-runtime: diff --git a/docs/root/configuration/http/http_filters/ext_authz_filter.rst b/docs/root/configuration/http/http_filters/ext_authz_filter.rst index 94414daa0e285..232093c7c8ee3 100644 --- a/docs/root/configuration/http/http_filters/ext_authz_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_authz_filter.rst @@ -179,16 +179,17 @@ Dynamic Metadata ---------------- .. _config_http_filters_ext_authz_dynamic_metadata: -.. note:: - - The External Authorization filter emits dynamic metadata only when it is configured to use - gRPC service as the authorization server. +The External Authorization filter supports emitting dynamic metadata as an opaque ``google.protobuf.Struct``. -The External Authorization filter emits dynamic metadata as an opaque ``google.protobuf.Struct`` -*only* when the gRPC authorization server returns a :ref:`CheckResponse -` with a filled :ref:`dynamic_metadata +When using a gRPC authorization server, dynamic metadata will be emitted only when the :ref:`CheckResponse +` contains a filled :ref:`dynamic_metadata ` field. +When using an HTTP authorization server, dynamic metadata will be emitted only when there are response headers +from the authorization server that match the configured +:ref:`dynamic_metadata_from_headers `, +if set. For every response header that matches, the filter will emit dynamic metadata whose key is the name of the matched header and whose value is the value of the matched header. + Runtime ------- The fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key diff --git a/docs/root/configuration/http/http_filters/ext_proc_filter.rst b/docs/root/configuration/http/http_filters/ext_proc_filter.rst index 989413a878171..d0b5544f4e243 100644 --- a/docs/root/configuration/http/http_filters/ext_proc_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_proc_filter.rst @@ -2,7 +2,7 @@ External Processing =================== -* :ref:`Http filter v3 API reference ` +* :ref:`Http filter v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ext_proc* The external processing filter connects an external service, called an "external processor," @@ -12,9 +12,9 @@ and modifying the headers, body, and trailers of each message, or by returning a The protocol itself is based on a bidirectional gRPC stream. Envoy will send the external processor -:ref:`ProcessingRequest ` +:ref:`ProcessingRequest ` messages, and the processor must reply with -:ref:`ProcessingResponse ` +:ref:`ProcessingResponse ` messages. Configuration options are provided to control which events are sent to the processor. @@ -26,7 +26,7 @@ stream requests from the proxy. This filter is a work in progress. Most of the major bits of functionality are complete. The updated list of supported features and implementation status may -be found on the :ref:`reference page `. +be found on the :ref:`reference page `. Statistics ---------- diff --git a/docs/root/configuration/http/http_filters/http_filters.rst b/docs/root/configuration/http/http_filters/http_filters.rst index cb77f65eed621..dea25d116ed77 100644 --- a/docs/root/configuration/http/http_filters/http_filters.rst +++ b/docs/root/configuration/http/http_filters/http_filters.rst @@ -46,16 +46,3 @@ HTTP filters sxg_filter tap_filter wasm_filter - -.. TODO(toddmgreer): Remove this hack and add user-visible CacheFilter docs when CacheFilter is production-ready. -.. toctree:: - :hidden: - - ../../../api-v3/extensions/filters/http/admission_control/v3alpha/admission_control.proto - ../../../api-v3/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto - ../../../api-v3/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto - ../../../api-v3/service/ext_proc/v3alpha/external_processor.proto - ../../../api-v3/extensions/filters/http/oauth2/v3alpha/oauth.proto - ../../../api-v3/extensions/filters/http/cache/v3alpha/cache.proto - ../../../api-v3/extensions/cache/simple_http_cache/v3alpha/config.proto - ../../../api-v3/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 72ec360587ca3..78777d4e9c7b6 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -510,8 +510,6 @@ base64Escape() Encodes the input string as base64. This can be useful for escaping binary data. -.. _config_http_filters_lua_header_wrapper: - timestamp() ^^^^^^^^^^^ @@ -523,6 +521,8 @@ High resolution timestamp function. *format* is an optional enum parameter to in *EnvoyTimestampResolution.MILLISECOND* is supported The function returns timestamp in milliseconds since epoch by default if format is not set. +.. _config_http_filters_lua_header_wrapper: + Header object API ----------------- @@ -546,6 +546,29 @@ get() Gets a header. *key* is a string that supplies the header key. Returns a string that is the header value or nil if there is no such header. +getAtIndex() +^^^^^^^^^^^^ + +.. code-block:: lua + + headers:getAtIndex(key, index) + +Gets the header value at the given index. It can be used to fetch a specific value in case the +given header has multiple values. *key* is a string that supplies the header key and index is +an integer that supplies the position. It returns a string that is the header value or nil if +there is no such header or if there is no value at the specified index. + +getNumValues() +^^^^^^^^^^^^^^ + +.. code-block:: lua + + headers:getNumValues(key) + +Gets the number of values of a given header. It can be used to fetch the total number of values in case +the given header has multiple values. *key* is a string that supplies the header key. It returns +an integer with the value size for the given header or *0* if there is no such header. + __pairs() ^^^^^^^^^ diff --git a/docs/root/configuration/http/http_filters/oauth2_filter.rst b/docs/root/configuration/http/http_filters/oauth2_filter.rst index 0ea1b97c95d00..5b8cf97e50951 100644 --- a/docs/root/configuration/http/http_filters/oauth2_filter.rst +++ b/docs/root/configuration/http/http_filters/oauth2_filter.rst @@ -4,34 +4,34 @@ OAuth2 ====== -* :ref:`v3 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.oauth2*. The OAuth filter's flow involves: * An unauthenticated user arrives at myapp.com, and the oauth filter redirects them to the - :ref:`authorization_endpoint ` - for login. The :ref:`client_id ` - and the :ref:`redirect_uri ` + :ref:`authorization_endpoint ` + for login. The :ref:`client_id ` + and the :ref:`redirect_uri ` are sent as query string parameters in this first redirect. * After a successful login, the authn server should be configured to redirect the user back to the - :ref:`redirect_uri ` + :ref:`redirect_uri ` provided in the query string in the first step. In the below code example, we choose /callback as the configured match path. An "authorization grant" is included in the query string for this second redirect. -* Using this new grant and the :ref:`token_secret `, +* Using this new grant and the :ref:`token_secret `, the filter then attempts to retrieve an access token from - the :ref:`token_endpoint `. The filter knows it has to do this + the :ref:`token_endpoint `. The filter knows it has to do this instead of reinitiating another login because the incoming request has a path that matches the - :ref:`redirect_path_matcher ` criteria. + :ref:`redirect_path_matcher ` criteria. * Upon receiving an access token, the filter sets cookies so that subseqeuent requests can skip the full flow. These cookies are calculated using the - :ref:`hmac_secret ` + :ref:`hmac_secret ` to assist in encoding. * The filter calls continueDecoding() to unblock the filter chain. When the authn server validates the client and returns an authorization token back to the OAuth filter, no matter what format that token is, if -:ref:`forward_bearer_token ` +:ref:`forward_bearer_token ` is set to true the filter will send over a cookie named ``BearerToken`` to the upstream. Additionally, the ``Authorization`` header will be populated with the same value. @@ -46,7 +46,7 @@ Example configuration The following is an example configuring the filter. .. validated-code-block:: yaml - :type-name: envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + :type-name: envoy.extensions.filters.http.oauth2.v3.OAuth2 config: token_endpoint: @@ -99,7 +99,7 @@ Below is a complete code example of how we employ the filter as one of http_filters: - name: envoy.filters.http.oauth2 typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 config: token_endpoint: cluster: oauth @@ -178,8 +178,8 @@ Below is a complete code example of how we employ the filter as one of sni: auth.example.com Finally, the following code block illustrates sample contents inside a yaml file containing both credential secrets. -Both the :ref:`token_secret ` -and the :ref:`hmac_secret ` +Both the :ref:`token_secret ` +and the :ref:`hmac_secret ` can be defined in one shared file. .. code-block:: yaml @@ -203,14 +203,14 @@ It is recommended to pair this filter with the :ref:`CSRF Filter ` +:ref:`authorization_endpoint ` provider will likely reject the incoming request, and your access cookies will not be cached to bypass future logins. The signout path will redirect the current user to '/', and clear all authentication cookies related to the HMAC validation. Consequently, the OAuth filter will then restart the full OAuth flow at the root path, sending the user to the configured auth endpoint. -:ref:`pass_through_matcher ` provides +:ref:`pass_through_matcher ` provides an interface for users to provide specific header matching criteria such that, when applicable, the OAuth flow is entirely skipped. When this occurs, the ``oauth_success`` metric is still incremented. diff --git a/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml b/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml index 2e30456d03d0e..ec2306721083e 100644 --- a/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml +++ b/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml @@ -18,7 +18,7 @@ static_resources: - filters: - name: envoy.filters.network.sni_dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig + "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3.FilterConfig port_value: 443 dns_cache_config: name: dynamic_forward_proxy_cache_config diff --git a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst index 2c286686f33f5..ec7828db5c120 100644 --- a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst +++ b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst @@ -5,7 +5,7 @@ Kafka Broker filter The Apache Kafka broker filter decodes the client protocol for `Apache Kafka `_, both the requests and responses in the payload. -The message versions in `Kafka 2.4.0 `_ +The message versions in `Kafka 2.8.1 `_ are supported. The filter attempts not to influence the communication between client and brokers, so the messages that could not be decoded (due to Kafka client or broker running a newer version than supported by diff --git a/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst b/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst index 4a8504b7d67e3..bebb7c31aa5bf 100644 --- a/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst +++ b/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst @@ -6,7 +6,7 @@ Kafka Mesh filter The Apache Kafka mesh filter provides a facade for `Apache Kafka `_ producers. Produce requests sent to this filter insance can be forwarded to one of multiple clusters, depending on configured forwarding rules. Corresponding message versions from -Kafka 2.4.0 are supported. +Kafka 2.8.1 are supported. * :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.kafka_mesh*. @@ -78,7 +78,6 @@ Notes Given that this filter does its own processing of received requests, there are some changes in behaviour compared to explicit connection to a Kafka cluster: -#. Record headers are not sent upstream. #. Only ProduceRequests with version 2 are supported (what means very old producers like 0.8 are not going to be supported). #. Python producers need to set API version of at least 1.0.0, so that the produce requests they diff --git a/docs/root/configuration/listeners/udp_filters/dns_filter.rst b/docs/root/configuration/listeners/udp_filters/dns_filter.rst index 1c2cab456f236..92724400ecaef 100644 --- a/docs/root/configuration/listeners/udp_filters/dns_filter.rst +++ b/docs/root/configuration/listeners/udp_filters/dns_filter.rst @@ -7,7 +7,7 @@ DNS Filter DNS Filter is under active development and should be considered alpha and not production ready. -* :ref:`v3 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.udp_listener.dns_filter* Overview @@ -24,7 +24,7 @@ will use for external resolution. Users can disable external DNS resolution by o client configuration object. The filter supports :ref:`per-filter configuration -`. +`. An Example configuration follows that illustrates how the filter can be used. Example Configuration @@ -35,7 +35,7 @@ Example Configuration listener_filters: name: envoy.filters.udp.dns_filter typed_config: - "@type": "type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig" + "@type": "type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3.DnsFilterConfig" stat_prefix: "dns_filter_prefix" client_config: resolution_timeout: 5s @@ -131,7 +131,7 @@ Example External DnsTable Configuration listener_filters: name: "envoy.filters.udp.dns_filter" typed_config: - '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3.DnsFilterConfig' stat_prefix: "my_prefix" server_config: external_dns_table: diff --git a/docs/root/configuration/observability/statistics.rst b/docs/root/configuration/observability/statistics.rst index 57188dfde65ce..930414a929581 100644 --- a/docs/root/configuration/observability/statistics.rst +++ b/docs/root/configuration/observability/statistics.rst @@ -33,6 +33,7 @@ Server related statistics are rooted at *server.* with following statistics: envoy_bug_failures, Counter, Number of envoy bug failures detected in a release build. File or report the issue if this increments as this may be serious. static_unknown_fields, Counter, Number of messages in static configuration with unknown fields dynamic_unknown_fields, Counter, Number of messages in dynamic configuration with unknown fields + wip_protos, Counter, Number of messages and fields marked as work-in-progress being used .. _server_compilation_settings_statistics: diff --git a/docs/root/configuration/operations/overload_manager/overload_manager.rst b/docs/root/configuration/operations/overload_manager/overload_manager.rst index 12ae610921c09..7dacd28b323b0 100644 --- a/docs/root/configuration/operations/overload_manager/overload_manager.rst +++ b/docs/root/configuration/operations/overload_manager/overload_manager.rst @@ -199,9 +199,9 @@ threshold for tracking and a single overload action entry that resets streams: ... We will only track streams using >= -:math:`2^minimum_account_to_track_power_of_two` worth of allocated memory in +:math:`2^{minimum\_account\_to\_track\_power\_of\_two}` worth of allocated memory in buffers. In this case, by setting the `minimum_account_to_track_power_of_two` -to `20` we will track streams using >= 1MiB since :math:`2^20` is 1MiB. Streams +to `20` we will track streams using >= 1MiB since :math:`2^{20}` is 1MiB. Streams using >= 1MiB will be classified into 8 power of two sized buckets. Currently, the number of buckets is hardcoded to 8. For this example, the buckets are as follows: @@ -240,7 +240,7 @@ of streams that end up getting reset and to prevent the worker thread from locking up and triggering the Watchdog system. Given that there are only 8 buckets, we partition the space with a gradation of -:math:`gradation = (saturation_threshold - scaling_threshold)/8`. Hence at 85% +:math:`gradation = (saturation\_threshold - scaling\_threshold)/8`. Hence at 85% heap usage we reset streams in the last bucket e.g. those using `>= 128MiB`. At :math:`85% + 1 * gradation` heap usage we reset streams in the last two buckets e.g. those using `>= 64MiB`, prioritizing the streams in the last bucket since diff --git a/docs/root/faq/configuration/sni.rst b/docs/root/faq/configuration/sni.rst index 9b33302c595ec..e7bbdf1fb0b9f 100644 --- a/docs/root/faq/configuration/sni.rst +++ b/docs/root/faq/configuration/sni.rst @@ -70,8 +70,10 @@ How do I configure SNI for clusters? ==================================== For clusters, a fixed SNI can be set in :ref:`UpstreamTlsContext `. -To derive SNI from HTTP ``host`` or ``:authority`` header, turn on +To derive SNI from a downstream HTTP header like, ``host`` or ``:authority``, turn on :ref:`auto_sni ` to override the fixed SNI in -`UpstreamTlsContext`. If upstream will present certificates with the hostname in SAN, turn on +`UpstreamTlsContext`. A custom header other than the ``host`` or ``:authority`` can also be supplied using the optional +:ref:`override_auto_sni_header ` field. +If upstream will present certificates with the hostname in SAN, turn on :ref:`auto_san_validation ` too. It still needs a trust CA in validation context in ``UpstreamTlsContext`` for trust anchor. diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 72c090aac266b..89177db38b7f8 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -30,6 +30,21 @@ Connection timeouts apply to the entire HTTP connection and all streams the conn connections use the :ref:`common_http_protocol_options ` field in the Cluster's :ref:`extension_protocol_options`, keyed by `envoy.extensions.upstreams.http.v3.HttpProtocolOptions` +* The HTTP protocol :ref:`max_connection_duration ` + is defined in a generic message used by both the HTTP connection manager as well as upstream cluster + HTTP connections but is currently only implemented for the downstream connections. The maximum + connection duration is the time after which a downstream connection will be drained and/or closed, + starting from when it first got established. If there are no active streams, the connection will be + closed. If there are any active streams, the drain sequence will kick-in, and the connection will be + force-closed after the drain period. The default value of max connection duration is *0* or unlimited, + which means that the connections will never be closed due to aging. It could be helpful in scenarios + when you are running a pool of Envoy edge-proxies and would want to close a downstream connection after + some time to prevent sticky-ness. It could also help to better load balance the overall traffic among + this pool, especially if the size of this pool is dynamically changing. To modify the max connection + duration for downstream connections use the + :ref:`common_http_protocol_options ` + field in the HTTP connection manager configuration. + See :ref:`below ` for other connection timeouts. Stream timeouts @@ -95,6 +110,11 @@ stream timeouts already introduced above. is sent to the downstream, which normally happens after the upstream has sent response headers. This timeout can be used with streaming endpoints to retry if the upstream fails to begin a response within the timeout. +* The route :ref:`per_try_idle_timeout ` + can be configured to ensure continued response progress of individual retry attempts (including + the first attempt). This is useful in cases where the total upstream request time is bounded + by the number of attempts multiplied by the per try timeout, but while the user wants to + ensure that individual attempts are making progress. * The route :ref:`MaxStreamDuration proto ` can be used to override the HttpConnectionManager's :ref:`max_stream_duration ` diff --git a/docs/root/intro/arch_overview/http/upgrades.rst b/docs/root/intro/arch_overview/http/upgrades.rst index cefa72e67b807..f0c92d42a531b 100644 --- a/docs/root/intro/arch_overview/http/upgrades.rst +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -52,10 +52,13 @@ a deployment of the form: In this case, if a client is for example using WebSocket, we want the Websocket to arrive at the upstream server functionally intact, which means it needs to traverse the HTTP/2+ hop. -This is accomplished via `Extended CONNECT (RFC8441) `_ support, +This is accomplished for HTTP/2 via `Extended CONNECT (RFC8441) `_ support, turned on by setting :ref:`allow_connect ` -true at the second layer Envoy. The -WebSocket request will be transformed into an HTTP/2+ CONNECT stream, with :protocol header +true at the second layer Envoy. For HTTP/3 there is parallel support configured by the alpha option +:ref:`allow_extended_connect ` as +there is no formal RFC yet. + +The WebSocket request will be transformed into an HTTP/2+ CONNECT stream, with :protocol header indicating the original upgrade, traverse the HTTP/2+ hop, and be downgraded back into an HTTP/1 WebSocket Upgrade. This same Upgrade-CONNECT-Upgrade transformation will be performed on any HTTP/2+ hop, with the documented flaw that the HTTP/1.1 method is always assumed to be GET. diff --git a/docs/root/intro/arch_overview/observability/access_logging.rst b/docs/root/intro/arch_overview/observability/access_logging.rst index a40cddbe465ff..7b77a4180be4b 100644 --- a/docs/root/intro/arch_overview/observability/access_logging.rst +++ b/docs/root/intro/arch_overview/observability/access_logging.rst @@ -73,6 +73,6 @@ Further reading * File :ref:`access log sink `. * gRPC :ref:`Access Log Service (ALS) ` sink. -* OpenTelemetry (gRPC) :ref:`LogsService ` +* OpenTelemetry (gRPC) :ref:`LogsService ` * Stdout :ref:`access log sink ` * Stderr :ref:`access log sink ` diff --git a/docs/root/intro/arch_overview/security/jwt_authn_filter.rst b/docs/root/intro/arch_overview/security/jwt_authn_filter.rst index 9c53106ab146f..9e7de4ae11989 100644 --- a/docs/root/intro/arch_overview/security/jwt_authn_filter.rst +++ b/docs/root/intro/arch_overview/security/jwt_authn_filter.rst @@ -23,6 +23,6 @@ could combine multiple JWT requirements for the same request. The verification could be either specified inline in the filter config or fetched from remote server via HTTP/HTTPS. -The JWT Authentication filter also supports to write the payloads of the successfully verified JWT -to :ref:`Dynamic State ` so that later filters could use -it to make their own decisions based on the JWT payloads. +The JWT Authentication filter also supports to write the header and payload of the successfully +verified JWT to :ref:`Dynamic State ` so that later +filters could use it to make their own decisions based on the JWT payloads. diff --git a/docs/root/intro/arch_overview/upstream/connection_pooling.rst b/docs/root/intro/arch_overview/upstream/connection_pooling.rst index 147171f498e13..b6d93cf3c0e12 100644 --- a/docs/root/intro/arch_overview/upstream/connection_pooling.rst +++ b/docs/root/intro/arch_overview/upstream/connection_pooling.rst @@ -50,8 +50,7 @@ pool will drain the affected connection. Once a connection reaches its :ref:`max stream limit `, it will be marked as busy until a stream is available. New connections are established anytime there is a pending request without a connection that can be dispatched to (up to circuit breaker limits for -connections). HTTP/3 upstream support is currently only usable in situations where HTTP/3 is guaranteed -to work, but automatic failover to TCP is coming soon!. +connections). Automatic protocol selection ---------------------------- @@ -69,10 +68,21 @@ then 300ms later, if a QUIC connection is not established, will also attempt to Whichever handshake succeeds will be used for the initial stream, but if both TCP and QUIC connections are established, QUIC will eventually be preferred. -Upcoming versions of HTTP/3 support will include only selecting HTTP/3 if the upstream advertises support -either via `HTTP Alternative Services `_, -`HTTPS DNS RR `_, or "QUIC hints" which -will be manually configured. This path is alpha and rapidly undergoing improvements with the goal of having +If an alternate protocol cache is configured via +:ref:`alternate_protocols_cache_options ` +then HTTP/3 connections will only be attempted to servers which +advertise HTTP/3 support either via `HTTP Alternative Services `, (eventually +the `HTTPS DNS resource record` or "QUIC hints" +which will be manually configured). +If no such advertisement exists, then HTTP/2 or HTTP/1 will be used instead. + +If no alternate protocol cache is configured, then HTTP/3 connections will be attempted to +all servers, even those which do not advertise HTTP/3. + +Further, HTTP/3 runs over QUIC (which uses UDP) and not over TCP (which HTTP/1 and HTTP/2 use). +It is not uncommon for network devices to block UDP traffic, and hence block HTTP/3. This +means that upstream HTTP/3 connection attempts might be blocked by the network and will fall +back to using HTTP/2 or HTTP/1. This path is alpha and rapidly undergoing improvements with the goal of having the default behavior result in optimal latency for internet environments, so please be patient and follow along with Envoy release notes to stay aprised of the latest and greatest changes. diff --git a/docs/root/intro/arch_overview/upstream/health_checking.rst b/docs/root/intro/arch_overview/upstream/health_checking.rst index 267542070ef4f..0c3e7596bd01f 100644 --- a/docs/root/intro/arch_overview/upstream/health_checking.rst +++ b/docs/root/intro/arch_overview/upstream/health_checking.rst @@ -12,10 +12,10 @@ checking along with various settings (check interval, failures required before m unhealthy, successes required before marking a host healthy, etc.): * **HTTP**: During HTTP health checking Envoy will send an HTTP request to the upstream host. By - default, it expects a 200 response if the host is healthy. Expected response codes are + default, it expects a 200 response if the host is healthy. Expected and retriable response codes are :ref:`configurable `. The - upstream host can return 503 if it wants to immediately notify downstream hosts to no longer - forward traffic to it. + upstream host can return a non-expected or non-retriable status code (any non-200 code by default) if + it wants to immediately notify downstream hosts to no longer forward traffic to it. * **L3/L4**: During L3/L4 health checking, Envoy will send a configurable byte buffer to the upstream host. It expects the byte buffer to be echoed in the response if the host is to be considered healthy. Envoy also supports connect only L3/L4 health checking. diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancing.rst b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancing.rst index 36e0fddd3ca8d..de648a4b8c73f 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancing.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancing.rst @@ -15,3 +15,4 @@ Load Balancing original_dst zone_aware subsets + slow_start diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/slow_start.rst b/docs/root/intro/arch_overview/upstream/load_balancing/slow_start.rst new file mode 100644 index 0000000000000..e510f66982556 --- /dev/null +++ b/docs/root/intro/arch_overview/upstream/load_balancing/slow_start.rst @@ -0,0 +1,60 @@ +.. _arch_overview_load_balancing_slow_start: + +Slow start mode +=============== + +Slow start mode is a configuration setting in Envoy to progressively increase amount of traffic for newly added upstream endpoints. +With no slow start enabled Envoy would send a proportional amount of traffic to new upstream endpoints. +This could be undesirable for services that require warm up time to serve full production load and could result in request timeouts, loss of data and deteriorated user experience. + +Slow start mode is a mechanism that affects load balancing weight of upstream endpoints and can be configured per upstream cluster. +Currently, slow start is supported in :ref:`Round Robin ` and :ref:`Least Request ` load balancer types. + +Users can specify a :ref:`slow start window parameter` (in seconds), so that if endpoint "cluster membership duration" (amount of time since it has joined the cluster) is within the configured window, it enters slow start mode. +During slow start window, load balancing weight of a particular endpoint will be scaled with time factor, e.g.: + +.. math:: + + NewWeight = {Weight*TimeFactor}^\frac{1}{Aggression} + +where, + +.. math:: + + TimeFactor = \frac{max(TimeSinceStartInSeconds,1)}{SlowStartWindowInSeconds} + +As time progresses, more and more traffic would be sent to endpoint within slow start window. + +:ref:`Aggression parameter` non-linearly affects endpoint weight and represents the speed of ramp-up. +By tuning aggression parameter, one could achieve polynomial or exponential speed for traffic increase. +Below simulation demonstrates how various values for aggression affect traffic ramp-up: + +.. image:: /_static/slow_start_aggression.svg + :width: 60% + :align: center + +Whenever a slow start window duration elapses, upstream endpoint exits slow start mode and gets regular amount of traffic according to load balancing algorithm. +Its load balancing weight will no longer be scaled with runtime bias and aggression. Endpoint could also exit slow start mode in case it leaves the cluster. + +To reiterate, endpoint enters slow start mode: + * If no active healthcheck is configured per cluster, immediately if its cluster membership duration is within slow start window. + * In case an active healthcheck is configured per cluster, when its cluster membership duration is within slow start window and endpoint has passed an active healthcheck. + If endpoint does not pass an active healthcheck during entire slow start window (since it has been added to upstream cluster), then it never enters slow start mode. + +Endpoint exits slow start mode when: + * It leaves the cluster. + * Its cluster membership duration is greater than slow start window. + * It does not pass an active healthcheck configured per cluster. + Endpoint could further re-enter slow start, if it passes an active healthcheck and its creation time is within slow start window. + +It is not recommended enabling slow start mode in low traffic or high number of endpoints scenarios, potential drawbacks would be: + * Endpoint starvation, where endpoint has low probability to receive a request either due to low traffic or high number of total endpoints. + * Spurious (non-gradual) increase of traffic per endpoint, whenever a starving endpoint receives a request and sufficient time has passed within slow start window, + its load balancing weight will increase non linearly due to time factor. + +Below is an example of how result load balancing weight would look like for endpoints in same priority with Round Robin Loadbalancer type, slow start window of 60 seconds, no active healthcheck and 1.0 aggression. +Once endpoints E1 and E2 exit slow start mode, their load balancing weight remains constant: + +.. image:: /_static/slow_start_example.svg + :width: 60% + :align: center diff --git a/docs/root/intro/deployment_types/double_proxy.rst b/docs/root/intro/deployment_types/double_proxy.rst index cbf6cef40f4b6..dc21aaf5a23a7 100644 --- a/docs/root/intro/deployment_types/double_proxy.rst +++ b/docs/root/intro/deployment_types/double_proxy.rst @@ -22,4 +22,4 @@ Configuration template ^^^^^^^^^^^^^^^^^^^^^^ The source distribution includes an example double proxy configuration. See -:ref:`here ` for more information. +:ref:`here ` for more information. diff --git a/docs/root/intro/deployment_types/front_proxy.rst b/docs/root/intro/deployment_types/front_proxy.rst index a8a11d4177c96..efd6fa300a26a 100644 --- a/docs/root/intro/deployment_types/front_proxy.rst +++ b/docs/root/intro/deployment_types/front_proxy.rst @@ -22,4 +22,4 @@ Configuration template ^^^^^^^^^^^^^^^^^^^^^^ The source distribution includes an example front proxy configuration. See -:ref:`here ` for more information. +:ref:`here ` for more information. diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index 72d243f0868d3..11eb0480fff0e 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -320,6 +320,14 @@ following are the command line options that Envoy supports. or count occurrences of unknown fields, in the interest of configuration processing speed. If :option:`--reject-unknown-dynamic-fields` is set to true, this flag has no effect. + .. attention:: + + In addition to not logging warnings or counting occurrences of unknown fields, setting this + option also disables counting and warnings of deprecated fields as well as work-in-progress + message and fields. It is *strongly* recommended that this option is not set on at least a + small portion of the fleet (staging, canary, etc.) in order to monitor for unknown, + deprecated, or work-in-progress usage. + .. option:: --disable-extensions *(optional)* This flag disabled the provided list of comma-separated extension names. Disabled diff --git a/docs/root/operations/traffic_tapping.rst b/docs/root/operations/traffic_tapping.rst index 9c95d7fad5e36..164b355f93f73 100644 --- a/docs/root/operations/traffic_tapping.rst +++ b/docs/root/operations/traffic_tapping.rst @@ -113,7 +113,7 @@ analysis with tools such as `Wireshark `_ with the .. code-block:: bash - bazel run @envoy_api_canonical//tools:tap2pcap /some/tap/path_0.pb path_0.pcap + bazel run @envoy_api//tools:tap2pcap /some/tap/path_0.pb path_0.pcap tshark -r path_0.pcap -d "tcp.port==10000,http2" -P 1 0.000000 127.0.0.1 → 127.0.0.1 HTTP2 157 Magic, SETTINGS, WINDOW_UPDATE, HEADERS 2 0.013713 127.0.0.1 → 127.0.0.1 HTTP2 91 SETTINGS, SETTINGS, WINDOW_UPDATE diff --git a/docs/root/start/install.rst b/docs/root/start/install.rst index 30d6a17c03503..34060a73a0d69 100644 --- a/docs/root/start/install.rst +++ b/docs/root/start/install.rst @@ -252,3 +252,6 @@ The following table shows the available Docker images The ``envoy-build-ubuntu`` image does not contain a working Envoy server, but can be used for building Envoy and related containers. This image requires 4-5GB of available disk space to use. + + All the docker images are available in Docker Hub, but `its rate limit policy `_ + doesn't apply to users since the "envoyproxy" namespace is allowlisted. diff --git a/docs/root/start/quick-start/securing.rst b/docs/root/start/quick-start/securing.rst index cf9f0b558c3c3..ccfd6bd0ce060 100644 --- a/docs/root/start/quick-start/securing.rst +++ b/docs/root/start/quick-start/securing.rst @@ -12,7 +12,7 @@ Envoy also has support for transmitting and receiving generic ``TCP`` traffic wi Envoy also offers a number of other ``HTTP``-based protocols for authentication and authorization such as :ref:`JWT `, :ref:`RBAC ` -and :ref:`OAuth `. +and :ref:`OAuth `. .. warning:: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index bdc1e5bb7e920..15c7ed1b0699d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -1,130 +1,32 @@ -1.20.0 (Pending) +1.21.0 (Pending) ================ Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* -* config: the ``--bootstrap-version`` CLI flag has been removed, Envoy has only been able to accept v3 - bootstrap configurations since 1.18.0. -* contrib: the :ref:`squash filter ` has been moved to - :ref:`contrib images `. -* contrib: the :ref:`kafka broker filter ` has been moved to - :ref:`contrib images `. -* contrib: the :ref:`RocketMQ proxy filter ` has been moved to - :ref:`contrib images `. -* contrib: the :ref:`Postgres proxy filter ` has been moved to - :ref:`contrib images `. -* contrib: the :ref:`MySQL proxy filter ` has been moved to - :ref:`contrib images `. -* dns_filter: :ref:`dns_filter ` - protobuf fields have been renumbered to restore compatibility with Envoy - 1.18, breaking compatibility with Envoy 1.19.0 and 1.19.1. The new field - numbering allows control planes supporting Envoy 1.18 to gracefully upgrade to - :ref:`dns_resolution_config `, - provided they skip over Envoy 1.19.0 and 1.19.1. - Control planes upgrading from Envoy 1.19.0 and 1.19.1 will need to - vendor the corresponding protobuf definitions to ensure that the - renumbered fields have the types expected by those releases. -* ext_authz: fixed skipping authentication when returning either a direct response or a redirect. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect`` runtime guard to false. - Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* config: configuration files ending in .yml now load as YAML. -* config: configuration file extensions now ignore case when deciding the file type. E.g., .JSON file load as JSON. -* config: reduced log level for "Unable to establish new stream" xDS logs to debug. The log level - for "gRPC config stream closed" is now reduced to debug when the status is ``Ok`` or has been - retriable (``DeadlineExceeded``, ``ResourceExhausted``, or ``Unavailable``) for less than 30 - seconds. -* grpc: gRPC async client can be cached and shared accross filter instances in the same thread, this feature is turned off by default, can be turned on by setting runtime guard ``envoy.reloadable_features.enable_grpc_async_client_cache`` to true. -* http: correct the use of the ``x-forwarded-proto`` header and the ``:scheme`` header. Where they differ - (which is rare) ``:scheme`` will now be used for serving redirect URIs and cached content. This behavior - can be reverted by setting runtime guard ``correct_scheme_and_xfp`` to false. -* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of request - URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed - to stripping the #fragment instead by setting the runtime guard ``envoy.reloadable_features.http_reject_path_with_fragment`` - to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard - ``envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled``. This runtime guard must only be set - to false when existing non-compliant traffic relies on #fragment in URI. When this option is enabled, Envoy request - authorization extensions may be bypassed. This override and its associated behavior will be decommissioned after the standard deprecation period. -* http: set the default :ref:`lazy headermap threshold ` to 3, - which defines the minimal number of headers in a request/response/trailers required for using a - dictionary in addition to the list. Setting the ``envoy.http.headermap.lazy_map_min_size`` runtime - feature to a non-negative number will override the default value. -* http: stop processing pending H/2 frames if connection transitioned to a closed state. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.skip_dispatching_frames_for_closed_connection`` to false. -* listener: added the :ref:`enable_reuse_port ` - field and changed the default for reuse_port from false to true, as the feature is now well - supported on the majority of production Linux kernels in use. The default change is aware of hot - restart, as otherwise the change would not be backwards compatible between restarts. This means - that hot restarting on to a new binary will retain the default of false until the binary undergoes - a full restart. To retain the previous behavior, either explicitly set the new configuration - field to false, or set the runtime feature flag ``envoy.reloadable_features.listener_reuse_port_default_enabled`` - to false. As part of this change, the use of reuse_port for TCP listeners on both macOS and - Windows has been disabled due to suboptimal behavior. See the field documentation for more - information. -* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in place update. -* quic: enables IETF connection migration. This feature requires stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. - Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* -* access log: fix ``%UPSTREAM_CLUSTER%`` when used in http upstream access logs. Previously, it was always logging as an unset value. -* access log: fix ``%UPSTREAM_CLUSTER%`` when used in http upstream access logs. Previously, it was always logging as an unset value. -* aws request signer: fix the AWS Request Signer extension to correctly normalize the path and query string to be signed according to AWS' guidelines, so that the hash on the server side matches. See `AWS SigV4 documentaion `_. -* cluster: delete pools when they're idle to fix unbounded memory use when using PROXY protocol upstream with tcp_proxy. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.conn_pool_delete_when_idle`` runtime guard to false. -* cluster: finish cluster warming even if hosts are removed before health check initialization. This only affected clusters with :ref:`ignore_health_on_host_removal `. -* compressor: fix a bug where if trailers were added and a subsequent filter paused the filter chain, the request could be stalled. This behavior can be reverted by setting ``envoy.reloadable_features.fix_added_trailers`` to false. -* dynamic forward proxy: fixing a validation bug where san and sni checks were not applied setting :ref:`http_protocol_options ` via :ref:`typed_extension_protocol_options `. -* ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. -* ext_authz: the network ext_authz filter now correctly sets dynamic metdata returned by the authorization service for non-OK responses. This behavior now matches the http ext_authz filter. -* hcm: remove deprecation for :ref:`xff_num_trusted_hops ` and forbid mixing ip detection extensions with old related knobs. -* http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. -* listener: fixed an issue on Windows where connections are not handled by all worker threads. -* xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under 'annotations' section of the segment data. +* listener: fixed the crash when updating listeners that do not bind to port. +* thrift_proxy: fix the thrift_proxy connection manager to correctly report success/error response metrics when performing :ref:`payload passthrough `. Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` -* http: removed ``envoy.reloadable_features.http_upstream_wait_connect_response`` runtime guard and legacy code paths. -* http: removed ``envoy.reloadable_features.allow_preconnect`` runtime guard and legacy code paths. -* listener: removed ``envoy.reloadable_features.disable_tls_inspector_injection`` runtime guard and legacy code paths. -* ocsp: removed ``envoy.reloadable_features.check_ocsp_policy deprecation`` runtime guard and legacy code paths. -* ocsp: removed ``envoy.reloadable_features.require_ocsp_response_for_must_staple_certs deprecation`` and legacy code paths. -* quic: removed ``envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing`` runtime guard. +* http: removed ``envoy.reloadable_features.return_502_for_upstream_protocol_errors``. Envoy will always return 502 code upon encountering upstream protocol error. +* http: removed ``envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure`` and legacy code paths. New Features ------------ -* access_log: added :ref:`METADATA` token to handle all types of metadata (DYNAMIC, CLUSTER, ROUTE). -* bootstrap: added :ref:`inline_headers ` in the bootstrap to make custom inline headers bootstrap configurable. -* contrib: added new :ref:`contrib images ` which contain contrib extensions. -* grpc reverse bridge: added a new :ref:`option ` to support streaming response bodies when withholding gRPC frames from the upstream. -* http: added :ref:`string_match ` in the header matcher. -* http: added :ref:`x-envoy-upstream-stream-duration-ms ` that allows configuring the max stream duration via a request header. -* http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. -* http: sanitizing the referer header as documented :ref:`here `. This feature can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.sanitize_http_header_referer`` to false. -* jwt_authn: added support for :ref:`Jwt Cache ` and its size can be specified by :ref:`jwt_cache_size `. -* jwt_authn: added support for extracting JWTs from request cookies using :ref:`from_cookies `. -* listener: new listener metric ``downstream_cx_transport_socket_connect_timeout`` to track transport socket timeouts. -* matcher: added :ref:`invert ` for inverting the match result in the metadata matcher. -* overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. -* rbac: added :ref:`destination_port_range ` for matching range of destination ports. -* route config: added :ref:`dynamic_metadata ` for routing based on dynamic metadata. -* sxg_filter: added filter to transform response to SXG package to :ref:`contrib images `. This can be enabled by setting :ref:`SXG ` configuration. -* thrift_proxy: added support for :ref:`mirroring requests `. +* ext_authz: added :ref:`query_parameters_to_set ` and :ref:`query_parameters_to_remove ` for adding and removing query string parameters when using a gRPC authorization server. +* http: added support for :ref:`retriable health check status codes `. Deprecated ---------- - -* api: the :ref:`matcher ` field has been deprecated in favor of - :ref:`matcher ` in order to break a build dependency. -* cluster: :ref:`max_requests_per_connection ` is deprecated in favor of :ref:`max_requests_per_connection `. -* http: the HeaderMatcher fields :ref:`exact_match `, :ref:`safe_regex_match `, - :ref:`prefix_match `, :ref:`suffix_match ` and - :ref:`contains_match ` are deprecated by :ref:`string_match `. -* listener: :ref:`reuse_port ` has been - deprecated in favor of :ref:`enable_reuse_port `. - At the same time, the default has been changed from false to true. See above for more information. diff --git a/docs/root/version_history/v1.1.0.rst b/docs/root/version_history/v1.1.0.rst index 4ad2763e52a4b..a01703e61c655 100644 --- a/docs/root/version_history/v1.1.0.rst +++ b/docs/root/version_history/v1.1.0.rst @@ -6,26 +6,26 @@ Changes * Switch from Jannson to RapidJSON for our JSON library (allowing for a configuration schema in 1.2.0). -* Upgrade :ref:`recommended version ` of various other libraries. +* Upgrade :ref:`recommended version ` of various other libraries. * Configurable DNS refresh rate for DNS service discovery types. * Upstream circuit breaker configuration can be :ref:`overridden via runtime - `. -* :ref:`Zone aware routing support `. + `. +* :ref:`Zone aware routing support `. * Generic header matching routing rule. * HTTP/2 graceful connection draining (double GOAWAY). -* DynamoDB filter :ref:`per shard statistics ` (pre-release AWS +* DynamoDB filter :ref:`per shard statistics ` (pre-release AWS feature). -* Initial release of the :ref:`fault injection HTTP filter `. -* HTTP :ref:`rate limit filter ` enhancements (note that the +* Initial release of the :ref:`fault injection HTTP filter `. +* HTTP :ref:`rate limit filter ` enhancements (note that the configuration for HTTP rate limiting is going to be overhauled in 1.2.0). -* Added :ref:`refused-stream retry policy `. -* Multiple :ref:`priority queues ` for upstream clusters +* Added :ref:`refused-stream retry policy `. +* Multiple :ref:`priority queues ` for upstream clusters (configurable on a per route basis, with separate connection pools, circuit breakers, etc.). -* Added max connection circuit breaking to the :ref:`TCP proxy filter `. -* Added :ref:`CLI ` options for setting the logging file flush interval as well +* Added max connection circuit breaking to the :ref:`TCP proxy filter `. +* Added :ref:`CLI ` options for setting the logging file flush interval as well as the drain/shutdown time during hot restart. * A very large number of performance enhancements for core HTTP/TCP proxy flows as well as a few new configuration flags to allow disabling expensive features if they are not needed (specifically request ID generation and dynamic response code stats). -* Support Mongo 3.2 in the :ref:`Mongo sniffing filter `. +* Support Mongo 3.2 in the :ref:`Mongo sniffing filter `. * Lots of other small fixes and enhancements not listed. diff --git a/docs/root/version_history/v1.10.0.rst b/docs/root/version_history/v1.10.0.rst index b8616a86a7c50..da7f3ee9baa06 100644 --- a/docs/root/version_history/v1.10.0.rst +++ b/docs/root/version_history/v1.10.0.rst @@ -5,97 +5,97 @@ Changes ------- * access log: added a new flag for upstream retry count exceeded. -* access log: added a :ref:`gRPC filter ` to allow filtering on gRPC status. +* access log: added a :ref:`gRPC filter ` to allow filtering on gRPC status. * access log: added a new flag for stream idle timeout. -* access log: added a new field for upstream transport failure reason in :ref:`file access logger ` and - :ref:`gRPC access logger ` for HTTP access logs. +* access log: added a new field for upstream transport failure reason in :ref:`file access logger ` and + :ref:`gRPC access logger ` for HTTP access logs. * access log: added new fields for downstream x509 information (URI sans and subject) to file and gRPC access logger. * admin: the admin server can now be accessed via HTTP/2 (prior knowledge). * admin: changed HTTP response status code from 400 to 405 when attempting to GET a POST-only route (such as /quitquitquit). * buffer: fix vulnerabilities when allocation fails. * build: releases are built with GCC-7 and linked with LLD. -* build: dev docker images :ref:`have been split ` from tagged images for easier +* build: dev docker images :ref:`have been split ` from tagged images for easier discoverability in Docker Hub. Additionally, we now build images for point releases. * config: added support of using google.protobuf.Any in opaque configs for extensions. * config: logging warnings when deprecated fields are in use. * config: removed deprecated --v2-config-only from command line config. -* config: removed deprecated_v1 sds_config from :ref:`Bootstrap config `. -* config: removed the deprecated_v1 config option from :ref:`ring hash `. -* config: removed REST_LEGACY as a valid :ref:`ApiType `. +* config: removed deprecated_v1 sds_config from :ref:`Bootstrap config `. +* config: removed the deprecated_v1 config option from :ref:`ring hash `. +* config: removed REST_LEGACY as a valid :ref:`ApiType `. * config: finish cluster warming only when a named response i.e. ClusterLoadAssignment associated to the cluster being warmed comes in the EDS response. This is a behavioural change from the current implementation where warming of cluster completes on missing load assignments also. * config: use Envoy cpuset size to set the default number or worker threads if :option:`--cpuset-threads` is enabled. -* config: added support for :ref:`initial_fetch_timeout `. The timeout is disabled by default. -* cors: added :ref:`filter_enabled & shadow_enabled RuntimeFractionalPercent flags ` to filter. +* config: added support for :ref:`initial_fetch_timeout `. The timeout is disabled by default. +* cors: added :ref:`filter_enabled & shadow_enabled RuntimeFractionalPercent flags ` to filter. * csrf: added * ext_authz: added support for buffering request body. * ext_authz: migrated from v2alpha to v2 and improved docs. * ext_authz: added a configurable option to make the gRPC service cross-compatible with V2Alpha. Note that this feature is already deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. * ext_authz: migrated from v2alpha to v2 and improved the documentation. * ext_authz: authorization request and response configuration has been separated into two distinct objects: :ref:`authorization request - ` and :ref:`authorization response - `. In addition, :ref:`client headers - ` and :ref:`upstream headers - ` replaces the previous *allowed_authorization_headers* object. - All the control header lists now support :ref:`string matcher ` instead of standard string. + ` and :ref:`authorization response + `. In addition, :ref:`client headers + ` and :ref:`upstream headers + ` replaces the previous *allowed_authorization_headers* object. + All the control header lists now support :ref:`string matcher ` instead of standard string. * fault: added the :ref:`max_active_faults - ` setting, as well as - :ref:`statistics ` for the number of active faults + ` setting, as well as + :ref:`statistics ` for the number of active faults and the number of faults the overflowed. * fault: added :ref:`response rate limit - ` fault injection. + ` fault injection. * fault: added :ref:`HTTP header fault configuration - ` to the HTTP fault filter. + ` to the HTTP fault filter. * governance: extending Envoy deprecation policy from 1 release (0-3 months) to 2 releases (3-6 months). -* health check: expected response codes in http health checks are now :ref:`configurable `. +* health check: expected response codes in http health checks are now :ref:`configurable `. * http: added new grpc_http1_reverse_bridge filter for converting gRPC requests into HTTP/1.1 requests. * http: fixed a bug where Content-Length:0 was added to HTTP/1 204 responses. -* http: added :ref:`max request headers size `. The default behaviour is unchanged. +* http: added :ref:`max request headers size `. The default behaviour is unchanged. * http: added modifyDecodingBuffer/modifyEncodingBuffer to allow modifying the buffered request/response data. * http: added encodeComplete/decodeComplete. These are invoked at the end of the stream, after all data has been encoded/decoded respectively. Default implementation is a no-op. -* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. -* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy ` for more details. +* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. +* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy ` for more details. * performance: new buffer implementation (disabled by default; to test it, add "--use-libevent-buffers 0" to the command-line arguments when starting Envoy). -* jwt_authn: added :ref:`filter_state_rules ` to allow specifying requirements from filterState by other filters. +* jwt_authn: added :ref:`filter_state_rules ` to allow specifying requirements from filterState by other filters. * ratelimit: removed deprecated rate limit configuration from bootstrap. -* redis: added :ref:`hashtagging ` to guarantee a given key's upstream. -* redis: added :ref:`latency stats ` for commands. -* redis: added :ref:`success and error stats ` for commands. +* redis: added :ref:`hashtagging ` to guarantee a given key's upstream. +* redis: added :ref:`latency stats ` for commands. +* redis: added :ref:`success and error stats ` for commands. * redis: migrate hash function for host selection to `MurmurHash2 `_ from std::hash. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. -* redis: added :ref:`latency_in_micros ` to specify the redis commands stats time unit in microseconds. -* router: added ability to configure a :ref:`retry policy ` at the +* redis: added :ref:`latency_in_micros ` to specify the redis commands stats time unit in microseconds. +* router: added ability to configure a :ref:`retry policy ` at the virtual host level. * router: added reset reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:` -* router: added :ref:`rq_reset_after_downstream_response_started ` counter stat to router stats. -* router: added per-route configuration of :ref:`internal redirects `. +* router: added :ref:`rq_reset_after_downstream_response_started ` counter stat to router stats. +* router: added per-route configuration of :ref:`internal redirects `. * router: removed deprecated route-action level headers_to_add/remove. -* router: made :ref:`max retries header ` take precedence over the number of retries in route and virtual host retry policies. -* router: added support for prefix wildcards in :ref:`virtual host domains ` +* router: made :ref:`max retries header ` take precedence over the number of retries in route and virtual host retry policies. +* router: added support for prefix wildcards in :ref:`virtual host domains ` * stats: added support for histograms in prometheus * stats: added usedonly flag to prometheus stats to only output metrics which have been updated at least once. * stats: added gauges tracking remaining resources before circuit breakers open. -* tap: added new alpha :ref:`HTTP tap filter `. +* tap: added new alpha :ref:`HTTP tap filter `. * tls: enabled TLS 1.3 on the server-side (non-FIPS builds). -* upstream: add hash_function to specify the hash function for :ref:`ring hash ` as either xxHash or `murmurHash2 `_. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. -* upstream: added :ref:`degraded health value ` which allows +* upstream: add hash_function to specify the hash function for :ref:`ring hash ` as either xxHash or `murmurHash2 `_. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. +* upstream: added :ref:`degraded health value ` which allows routing to certain hosts only when there are insufficient healthy hosts available. -* upstream: add cluster factory to allow creating and registering :ref:`custom cluster type `. -* upstream: added a :ref:`circuit breaker ` to limit the number of concurrent connection pools in use. -* tracing: added :ref:`verbose ` to support logging annotations on spans. -* upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size ` config parameter to strictly bound the ring size. +* upstream: add cluster factory to allow creating and registering :ref:`custom cluster type `. +* upstream: added a :ref:`circuit breaker ` to limit the number of concurrent connection pools in use. +* tracing: added :ref:`verbose ` to support logging annotations on spans. +* upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size ` config parameter to strictly bound the ring size. * zookeeper: added a ZooKeeper proxy filter that parses ZooKeeper messages (requests/responses/events). - Refer to :ref:`ZooKeeper proxy ` for more details. + Refer to :ref:`ZooKeeper proxy ` for more details. * upstream: added configuration option to select any host when the fallback policy fails. * upstream: stopped incrementing upstream_rq_total for HTTP/1 conn pool when request is circuit broken. Deprecated ---------- -* Use of `use_alpha` in :ref:`Ext-Authz Authorization Service ` is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. +* Use of `use_alpha` in :ref:`Ext-Authz Authorization Service ` is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. * Use of ``enabled`` in ``CorsPolicy``, found in - :ref:`route.proto `. + :ref:`route.proto `. Set the ``filter_enabled`` field instead. * Use of the ``type`` field in the ``FaultDelay`` message (found in - :ref:`fault.proto `) + :ref:`fault.proto `) has been deprecated. It was never used and setting it has no effect. It will be removed in the following release. diff --git a/docs/root/version_history/v1.11.0.rst b/docs/root/version_history/v1.11.0.rst index 78c9dce6d7c66..b59b25d887632 100644 --- a/docs/root/version_history/v1.11.0.rst +++ b/docs/root/version_history/v1.11.0.rst @@ -6,115 +6,115 @@ Changes * access log: added a new field for downstream TLS session ID to file and gRPC access logger. * access log: added a new field for route name to file and gRPC access logger. -* access log: added a new field for response code details in :ref:`file access logger ` and :ref:`gRPC access logger `. -* access log: added several new variables for exposing information about the downstream TLS connection to :ref:`file access logger ` and :ref:`gRPC access logger `. +* access log: added a new field for response code details in :ref:`file access logger ` and :ref:`gRPC access logger `. +* access log: added several new variables for exposing information about the downstream TLS connection to :ref:`file access logger ` and :ref:`gRPC access logger `. * access log: added a new flag for request rejected due to failed strict header check. -* admin: the administration interface now includes a :ref:`/ready endpoint ` for easier readiness checks. -* admin: extend :ref:`/runtime_modify endpoint ` to support parameters within the request body. -* admin: the :ref:`/listener endpoint ` now returns :ref:`listeners.proto ` which includes listener names and ports. +* admin: the administration interface now includes a :ref:`/ready endpoint ` for easier readiness checks. +* admin: extend :ref:`/runtime_modify endpoint ` to support parameters within the request body. +* admin: the :ref:`/listener endpoint ` now returns :ref:`listeners.proto ` which includes listener names and ports. * admin: added host priority to :http:get:`/clusters` and :http:get:`/clusters?format=json` endpoint response -* admin: the :ref:`/clusters endpoint ` now shows hostname +* admin: the :ref:`/clusters endpoint ` now shows hostname for each host, useful for DNS based clusters. * api: track and report requests issued since last load report. * build: releases are built with Clang and linked with LLD. -* config: added :ref:`stats_server_version_override ` in bootstrap, that can be used to override :ref:`server.version statistic `. -* control-plane: management servers can respond with HTTP 304 to indicate that config is up to date for Envoy proxies polling a :ref:`REST API Config Type ` +* config: added :ref:`stats_server_version_override ` in bootstrap, that can be used to override :ref:`server.version statistic `. +* control-plane: management servers can respond with HTTP 304 to indicate that config is up to date for Envoy proxies polling a :ref:`REST API Config Type ` * csrf: added support for allowlisting additional source origins. * dns: added support for getting DNS record TTL which is used by STRICT_DNS/LOGICAL_DNS cluster as DNS refresh rate. -* dubbo_proxy: support the :ref:`dubbo proxy filter `. +* dubbo_proxy: support the :ref:`dubbo proxy filter `. * dynamo_request_parser: adding support for transactions. Adds check for new types of dynamodb operations (TransactWriteItems, TransactGetItems) and awareness for new types of dynamodb errors (IdempotentParameterMismatchException, TransactionCanceledException, TransactionInProgressException). -* eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. +* eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. * eds: removed max limit for ``load_balancing_weight``. -* event: added :ref:`loop duration and poll delay statistics `. +* event: added :ref:`loop duration and poll delay statistics `. * ext_authz: added a ``x-envoy-auth-partial-body`` metadata header set to ``false|true`` indicating if there is a partial body sent in the authorization request message. * ext_authz: added configurable status code that allows customizing HTTP responses on filter check status errors. * ext_authz: added option to ``ext_authz`` that allows the filter clearing route cache. * grpc-json: added support for :ref:`auto mapping - `. -* health check: added :ref:`initial jitter ` to add jitter to the first health check in order to prevent thundering herd on Envoy startup. + `. +* health check: added :ref:`initial jitter ` to add jitter to the first health check in order to prevent thundering herd on Envoy startup. * hot restart: stats are no longer shared between hot restart parent/child via shared memory, but rather by RPC. Hot restart version incremented to 11. * http: added the ability to pass a URL encoded PEM encoded peer certificate chain in the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header. * http: fixed a bug where large unbufferable responses were not tracked in stats and logs correctly. * http: fixed a crashing bug where gRPC local replies would cause segfaults when upstream access logging was on. -* http: mitigated a race condition with the :ref:`delayed_close_timeout ` where it could trigger while actively flushing a pending write buffer for a downstream connection. -* http: added support for :ref:`preserve_external_request_id ` that represents whether the x-request-id should not be reset on edge entry inside mesh +* http: mitigated a race condition with the :ref:`delayed_close_timeout ` where it could trigger while actively flushing a pending write buffer for a downstream connection. +* http: added support for :ref:`preserve_external_request_id ` that represents whether the x-request-id should not be reset on edge entry inside mesh * http: changed ``sendLocalReply`` to send percent-encoded ``GrpcMessage``. -* http: added a :ref:`header_prefix ` configuration option to allow Envoy to send and process x-custom- prefixed headers rather than x-envoy. -* http: added :ref:`dynamic forward proxy ` support. +* http: added a :ref:`header_prefix ` configuration option to allow Envoy to send and process x-custom- prefixed headers rather than x-envoy. +* http: added :ref:`dynamic forward proxy ` support. * http: tracking the active stream and dumping state in Envoy crash handlers. This can be disabled by building with ``--define disable_object_dump_on_signal_trace=disabled`` * jwt_authn: make filter's parsing of JWT more flexible, allowing syntax like ``jwt=eyJhbGciOiJS...ZFnFIw,extra=7,realm=123`` -* listener: added :ref:`source IP ` - and :ref:`source port ` filter +* listener: added :ref:`source IP ` + and :ref:`source port ` filter chain matching. * lua: exposed functions to Lua to verify digital signature. -* original_src filter: added the :ref:`filter `. -* outlier_detector: added configuration :ref:`outlier_detection.split_external_local_origin_errors ` to distinguish locally and externally generated errors. See :ref:`arch_overview_outlier_detection` for full details. +* original_src filter: added the :ref:`filter `. +* outlier_detector: added configuration :ref:`outlier_detection.split_external_local_origin_errors ` to distinguish locally and externally generated errors. See :ref:`arch_overview_outlier_detection` for full details. * rbac: migrated from v2alpha to v2. * redis: add support for Redis cluster custom cluster type. * redis: automatically route commands using cluster slots for Redis cluster. -* redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. -* redis: added :ref:`request mirror policy ` to enable shadow traffic and/or dual writes. +* redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. +* redis: added :ref:`request mirror policy ` to enable shadow traffic and/or dual writes. * redis: add support for zpopmax and zpopmin commands. * redis: added - :ref:`max_buffer_size_before_flush ` to batch commands together until the encoder buffer hits a certain size, and - :ref:`buffer_flush_timeout ` to control how quickly the buffer is flushed if it is not full. -* redis: added auth support :ref:`downstream_auth_password ` for downstream client authentication, and :ref:`auth_password ` to configure authentication passwords for upstream server clusters. -* retry: added a retry predicate that :ref:`rejects canary hosts. ` -* router: add support for configuring a :ref:`gRPC timeout offset ` on incoming requests. -* router: added ability to control retry back-off intervals via :ref:`retry policy `. -* router: added ability to issue a hedged retry in response to a per try timeout via a :ref:`hedge policy `. + :ref:`max_buffer_size_before_flush ` to batch commands together until the encoder buffer hits a certain size, and + :ref:`buffer_flush_timeout ` to control how quickly the buffer is flushed if it is not full. +* redis: added auth support :ref:`downstream_auth_password ` for downstream client authentication, and :ref:`auth_password ` to configure authentication passwords for upstream server clusters. +* retry: added a retry predicate that :ref:`rejects canary hosts. ` +* router: add support for configuring a :ref:`gRPC timeout offset ` on incoming requests. +* router: added ability to control retry back-off intervals via :ref:`retry policy `. +* router: added ability to issue a hedged retry in response to a per try timeout via a :ref:`hedge policy `. * router: added a route name field to each http route in route.Route list * router: added several new variables for exposing information about the downstream TLS connection via :ref:`header - formatters `. + formatters `. * router: per try timeouts will no longer start before the downstream request has been received in full by the router.This ensures that the per try timeout does not account for slow downstreams and that will not start before the global timeout. -* router: added :ref:`RouteAction's auto_host_rewrite_header ` to allow upstream host header substitution with some other header's value +* router: added :ref:`RouteAction's auto_host_rewrite_header ` to allow upstream host header substitution with some other header's value * router: added support for UPSTREAM_REMOTE_ADDRESS :ref:`header formatter - `. + `. * router: add ability to reject a request that includes invalid values for - headers configured in :ref:`strict_check_headers ` + headers configured in :ref:`strict_check_headers ` * runtime: added support for :ref:`flexible layering configuration - `. + `. * runtime: added support for statically :ref:`specifying the runtime in the bootstrap configuration - `. -* runtime: :ref:`Runtime Discovery Service (RTDS) ` support added to layered runtime configuration. -* sandbox: added :ref:`CSRF sandbox `. + `. +* runtime: :ref:`Runtime Discovery Service (RTDS) ` support added to layered runtime configuration. +* sandbox: added :ref:`CSRF sandbox `. * server: ``--define manual_stamp=manual_stamp`` was added to allow server stamping outside of binary rules. more info in the `bazel docs `_. -* server: added :ref:`server state ` statistic. -* server: added :ref:`initialization_time_ms ` statistic. -* subset: added :ref:`list_as_any ` option to +* server: added :ref:`server state ` statistic. +* server: added :ref:`initialization_time_ms ` statistic. +* subset: added :ref:`list_as_any ` option to the subset lb which allows matching metadata against any of the values in a list value on the endpoints. -* tools: added :repo:`proto ` support for :ref:`router check tool ` tests. +* tools: added :repo:`proto ` support for :ref:`router check tool ` tests. * tracing: add trace sampling configuration to the route, to override the route level. -* upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. +* upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. * upstream: an EDS management server can now force removal of a host that is still passing active health checking by first marking the host as failed via EDS health check and subsequently removing it in a future update. This is a mechanism to work around a race condition in which an EDS implementation may remove a host before it has stopped passing active HC, thus causing the host to become stranded until a future update. -* upstream: added :ref:`an option ` +* upstream: added :ref:`an option ` that allows ignoring new hosts for the purpose of load balancing calculations until they have been health checked for the first time. * upstream: added runtime error checking to prevent setting dns type to STRICT_DNS or LOGICAL_DNS when custom resolver name is specified. -* upstream: added possibility to override fallback_policy per specific selector in :ref:`subset load balancer `. -* upstream: the :ref:`logical DNS cluster ` now +* upstream: added possibility to override fallback_policy per specific selector in :ref:`subset load balancer `. +* upstream: the :ref:`logical DNS cluster ` now displays the current resolved IP address in admin output instead of 0.0.0.0. Deprecated ---------- * The --max-stats and --max-obj-name-len flags no longer has any effect. -* Use of :ref:`cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_route ` instead. -* Use of :ref:`catch_all_cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_route ` instead. -* Use of json based schema in router check tool tests. The tests should follow validation :repo:`schema `. -* Use of the v1 style route configuration for the :ref:`TCP proxy filter ` - is now fully replaced with listener :ref:`filter chain matching `. +* Use of :ref:`cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_route ` instead. +* Use of :ref:`catch_all_cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_route ` instead. +* Use of json based schema in router check tool tests. The tests should follow validation :repo:`schema `. +* Use of the v1 style route configuration for the :ref:`TCP proxy filter ` + is now fully replaced with listener :ref:`filter chain matching `. Use this instead. -* Use of :ref:`runtime ` in :ref:`Bootstrap - `. Use :ref:`layered_runtime - ` instead. +* Use of :ref:`runtime ` in :ref:`Bootstrap + `. Use :ref:`layered_runtime + ` instead. * Specifying "deprecated_v1: true" in HTTP and network filter configuration to allow loading JSON configuration is now deprecated and will be removed in a following release. Update any custom filters to use protobuf configuration. A struct can be used for a mostly 1:1 conversion if needed. diff --git a/docs/root/version_history/v1.11.1.rst b/docs/root/version_history/v1.11.1.rst index 53176eac2b298..7d87049d2049f 100644 --- a/docs/root/version_history/v1.11.1.rst +++ b/docs/root/version_history/v1.11.1.rst @@ -5,15 +5,15 @@ Changes ------- * http: added mitigation of client initiated attacks that result in flooding of the downstream HTTP/2 connections. Those attacks can be logged at the "warning" level when the runtime feature ``http.connection_manager.log_flood_exception`` is enabled. The runtime setting defaults to disabled to avoid log spam when under attack. -* http: added :ref:`inbound_empty_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_consecutive_inbound_frames_with_empty_payload`` overrides :ref:`max_consecutive_inbound_frames_with_empty_payload setting `. Large override value (i.e. 2147483647) effectively disables mitigation of inbound frames with empty payload. -* http: added :ref:`inbound_priority_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound PRIORITY frames. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_inbound_priority_frames_per_stream`` overrides :ref:`max_inbound_priority_frames_per_stream setting `. Large override value effectively disables flood mitigation of inbound PRIORITY frames. -* http: added :ref:`inbound_window_update_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound WINDOW_UPDATE frames. The limit is configured by setting the :ref:`max_inbound_window_update_frames_per_data_frame_sent config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_inbound_window_update_frames_per_data_frame_sent`` overrides :ref:`max_inbound_window_update_frames_per_data_frame_sent setting `. Large override value effectively disables flood mitigation of inbound WINDOW_UPDATE frames. -* http: added :ref:`outbound_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit. The limit is configured by setting the :ref:`max_outbound_frames config setting ` - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_outbound_frames`` overrides :ref:`max_outbound_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of all types. -* http: added :ref:`outbound_control_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit for PING, SETTINGS and RST_STREAM frames. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_outbound_control_frames`` overrides :ref:`max_outbound_control_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of types PING, SETTINGS and RST_STREAM. -* http: enabled strict validation of HTTP/2 messaging. Previous behavior can be restored using :ref:`stream_error_on_invalid_http_messaging config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.stream_error_on_invalid_http_messaging`` overrides :ref:`stream_error_on_invalid_http_messaging config setting `. +* http: added :ref:`inbound_empty_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_consecutive_inbound_frames_with_empty_payload`` overrides :ref:`max_consecutive_inbound_frames_with_empty_payload setting `. Large override value (i.e. 2147483647) effectively disables mitigation of inbound frames with empty payload. +* http: added :ref:`inbound_priority_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound PRIORITY frames. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_inbound_priority_frames_per_stream`` overrides :ref:`max_inbound_priority_frames_per_stream setting `. Large override value effectively disables flood mitigation of inbound PRIORITY frames. +* http: added :ref:`inbound_window_update_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound WINDOW_UPDATE frames. The limit is configured by setting the :ref:`max_inbound_window_update_frames_per_data_frame_sent config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_inbound_window_update_frames_per_data_frame_sent`` overrides :ref:`max_inbound_window_update_frames_per_data_frame_sent setting `. Large override value effectively disables flood mitigation of inbound WINDOW_UPDATE frames. +* http: added :ref:`outbound_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit. The limit is configured by setting the :ref:`max_outbound_frames config setting ` + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_outbound_frames`` overrides :ref:`max_outbound_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of all types. +* http: added :ref:`outbound_control_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit for PING, SETTINGS and RST_STREAM frames. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_outbound_control_frames`` overrides :ref:`max_outbound_control_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of types PING, SETTINGS and RST_STREAM. +* http: enabled strict validation of HTTP/2 messaging. Previous behavior can be restored using :ref:`stream_error_on_invalid_http_messaging config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.stream_error_on_invalid_http_messaging`` overrides :ref:`stream_error_on_invalid_http_messaging config setting `. diff --git a/docs/root/version_history/v1.11.2.rst b/docs/root/version_history/v1.11.2.rst index 77f6b40f118d9..981c321333593 100644 --- a/docs/root/version_history/v1.11.2.rst +++ b/docs/root/version_history/v1.11.2.rst @@ -5,17 +5,17 @@ Changes ------- * http: fixed CVE-2019-15226 by adding a cached byte size in HeaderMap. -* http: added :ref:`max headers count ` for http connections. The default limit is 100. -* upstream: runtime feature `envoy.reloadable_features.max_response_headers_count` overrides the default limit for upstream :ref:`max headers count ` -* http: added :ref:`common_http_protocol_options ` - Runtime feature `envoy.reloadable_features.max_request_headers_count` overrides the default limit for downstream :ref:`max headers count ` +* http: added :ref:`max headers count ` for http connections. The default limit is 100. +* upstream: runtime feature `envoy.reloadable_features.max_response_headers_count` overrides the default limit for upstream :ref:`max headers count ` +* http: added :ref:`common_http_protocol_options ` + Runtime feature `envoy.reloadable_features.max_request_headers_count` overrides the default limit for downstream :ref:`max headers count ` * regex: backported safe regex matcher fix for CVE-2019-15225. Deprecated ---------- * Use of :ref:`idle_timeout - ` + ` is deprecated. Use :ref:`common_http_protocol_options - ` + ` instead. diff --git a/docs/root/version_history/v1.12.0.rst b/docs/root/version_history/v1.12.0.rst index ef1d0050194a7..159d4c738bf7e 100644 --- a/docs/root/version_history/v1.12.0.rst +++ b/docs/root/version_history/v1.12.0.rst @@ -4,86 +4,86 @@ Changes ------- -* access log: added a new flag for :ref:`downstream protocol error `. -* access log: added :ref:`buffering ` and :ref:`periodical flushing ` support to gRPC access logger. Defaults to 16KB buffer and flushing every 1 second. -* access log: added DOWNSTREAM_DIRECT_REMOTE_ADDRESS and DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT :ref:`access log formatters ` and gRPC access logger. -* access log: gRPC Access Log Service (ALS) support added for :ref:`TCP access logs `. -* access log: reintroduced :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes. -* admin: added ability to configure listener :ref:`socket options `. -* admin: added config dump support for Secret Discovery Service :ref:`SecretConfigDump `. -* admin: added support for :ref:`draining ` listeners via admin interface. +* access log: added a new flag for :ref:`downstream protocol error `. +* access log: added :ref:`buffering ` and :ref:`periodical flushing ` support to gRPC access logger. Defaults to 16KB buffer and flushing every 1 second. +* access log: added DOWNSTREAM_DIRECT_REMOTE_ADDRESS and DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT :ref:`access log formatters ` and gRPC access logger. +* access log: gRPC Access Log Service (ALS) support added for :ref:`TCP access logs `. +* access log: reintroduced :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes. +* admin: added ability to configure listener :ref:`socket options `. +* admin: added config dump support for Secret Discovery Service :ref:`SecretConfigDump `. +* admin: added support for :ref:`draining ` listeners via admin interface. * admin: added :http:get:`/stats/recentlookups`, :http:post:`/stats/recentlookups/clear`, :http:post:`/stats/recentlookups/disable`, and :http:post:`/stats/recentlookups/enable` endpoints. -* api: added :ref:`set_node_on_first_message_only ` option to omit the node identifier from the subsequent discovery requests on the same stream. +* api: added :ref:`set_node_on_first_message_only ` option to omit the node identifier from the subsequent discovery requests on the same stream. * buffer filter: now populates content-length header if not present. This behavior can be temporarily disabled using the runtime feature ``envoy.reloadable_features.buffer_filter_populate_content_length``. * build: official released binary is now PIE so it can be run with ASLR. -* config: added support for :ref:`delta xDS ` (including ADS) delivery. +* config: added support for :ref:`delta xDS ` (including ADS) delivery. * config: enforcing that terminal filters (e.g. HttpConnectionManager for L4, router for L7) be the last in their respective filter chains. -* config: added access log :ref:`extension filter `. +* config: added access log :ref:`extension filter `. * config: added support for :option:`--reject-unknown-dynamic-fields`, providing independent control over whether unknown fields are rejected in static and dynamic configuration. By default, unknown fields in static configuration are rejected and are allowed in dynamic configuration. Warnings are logged for the first use of any unknown field and these occurrences are counted in the - :ref:`server.static_unknown_fields ` and :ref:`server.dynamic_unknown_fields - ` statistics. + :ref:`server.static_unknown_fields ` and :ref:`server.dynamic_unknown_fields + ` statistics. * config: added async data access for local and remote data sources. -* config: changed the default value of :ref:`initial_fetch_timeout ` from 0s to 15s. This is a change in behaviour in the sense that Envoy will move to the next initialization phase, even if the first config is not delivered in 15s. Refer to :ref:`initialization process ` for more details. -* config: added stat :ref:`init_fetch_timeout `. -* config: tls_context in Cluster and FilterChain are deprecated in favor of transport socket. See :ref:`deprecated documentation ` for more information. +* config: changed the default value of :ref:`initial_fetch_timeout ` from 0s to 15s. This is a change in behaviour in the sense that Envoy will move to the next initialization phase, even if the first config is not delivered in 15s. Refer to :ref:`initialization process ` for more details. +* config: added stat :ref:`init_fetch_timeout `. +* config: tls_context in Cluster and FilterChain are deprecated in favor of transport socket. See :ref:`deprecated documentation ` for more information. * csrf: added PATCH to supported methods. -* dns: added support for configuring :ref:`dns_failure_refresh_rate ` to set the DNS refresh rate during failures. -* ext_authz: added :ref:`configurable ability ` to send dynamic metadata to the `ext_authz` service. -* ext_authz: added :ref:`filter_enabled RuntimeFractionalPercent flag ` to filter. +* dns: added support for configuring :ref:`dns_failure_refresh_rate ` to set the DNS refresh rate during failures. +* ext_authz: added :ref:`configurable ability ` to send dynamic metadata to the `ext_authz` service. +* ext_authz: added :ref:`filter_enabled RuntimeFractionalPercent flag ` to filter. * ext_authz: added tracing to the HTTP client. -* ext_authz: deprecated :ref:`cluster scope stats ` in favour of filter scope stats. -* fault: added overrides for default runtime keys in :ref:`HTTPFault ` filter. -* grpc: added :ref:`AWS IAM grpc credentials extension ` for AWS-managed xDS. -* grpc: added :ref:`gRPC stats filter ` for collecting stats about gRPC calls and streaming message counts. -* grpc-json: added support for :ref:`ignoring unknown query parameters `. -* grpc-json: added support for :ref:`the grpc-status-details-bin header `. -* header to metadata: added :ref:`PROTOBUF_VALUE ` and :ref:`ValueEncode ` to support protobuf Value and Base64 encoding. -* http: added a default one hour idle timeout to upstream and downstream connections. HTTP connections with no streams and no activity will be closed after one hour unless the default idle_timeout is overridden. To disable upstream idle timeouts, set the :ref:`idle_timeout ` to zero in Cluster :ref:`http_protocol_options `. To disable downstream idle timeouts, either set :ref:`idle_timeout ` to zero in the HttpConnectionManager :ref:`common_http_protocol_options ` or set the deprecated :ref:`connection manager ` field to zero. -* http: added the ability to format HTTP/1.1 header keys using :ref:`header_key_format `. +* ext_authz: deprecated :ref:`cluster scope stats ` in favour of filter scope stats. +* fault: added overrides for default runtime keys in :ref:`HTTPFault ` filter. +* grpc: added :ref:`AWS IAM grpc credentials extension ` for AWS-managed xDS. +* grpc: added :ref:`gRPC stats filter ` for collecting stats about gRPC calls and streaming message counts. +* grpc-json: added support for :ref:`ignoring unknown query parameters `. +* grpc-json: added support for :ref:`the grpc-status-details-bin header `. +* header to metadata: added :ref:`PROTOBUF_VALUE ` and :ref:`ValueEncode ` to support protobuf Value and Base64 encoding. +* http: added a default one hour idle timeout to upstream and downstream connections. HTTP connections with no streams and no activity will be closed after one hour unless the default idle_timeout is overridden. To disable upstream idle timeouts, set the :ref:`idle_timeout ` to zero in Cluster :ref:`http_protocol_options `. To disable downstream idle timeouts, either set :ref:`idle_timeout ` to zero in the HttpConnectionManager :ref:`common_http_protocol_options ` or set the deprecated :ref:`connection manager ` field to zero. +* http: added the ability to format HTTP/1.1 header keys using :ref:`header_key_format `. * http: added the ability to reject HTTP/1.1 requests with invalid HTTP header values, using the runtime feature ``envoy.reloadable_features.strict_header_validation``. * http: changed Envoy to forward existing x-forwarded-proto from upstream trusted proxies. Guarded by ``envoy.reloadable_features.trusted_forwarded_proto`` which defaults true. -* http: added the ability to configure the behavior of the server response header, via the :ref:`server_header_transformation ` field. -* http: added the ability to :ref:`merge adjacent slashes ` in the path. -* http: :ref:`AUTO ` codec protocol inference now requires the H2 magic bytes to be the first bytes transmitted by a downstream client. +* http: added the ability to configure the behavior of the server response header, via the :ref:`server_header_transformation ` field. +* http: added the ability to :ref:`merge adjacent slashes ` in the path. +* http: :ref:`AUTO ` codec protocol inference now requires the H2 magic bytes to be the first bytes transmitted by a downstream client. * http: remove h2c upgrade headers for HTTP/1 as h2c upgrades are currently not supported. -* http: absolute URL support is now on by default. The prior behavior can be reinstated by setting :ref:`allow_absolute_url ` to false. -* http: support :ref:`host rewrite ` in the dynamic forward proxy. -* http: support :ref:`disabling the filter per route ` in the grpc http1 reverse bridge filter. -* http: added the ability to :ref:`configure max connection duration ` for downstream connections. -* listeners: added :ref:`continue_on_listener_filters_timeout ` to configure whether a listener will still create a connection when listener filters time out. -* listeners: added :ref:`HTTP inspector listener filter `. -* listeners: added :ref:`connection balancer ` +* http: absolute URL support is now on by default. The prior behavior can be reinstated by setting :ref:`allow_absolute_url ` to false. +* http: support :ref:`host rewrite ` in the dynamic forward proxy. +* http: support :ref:`disabling the filter per route ` in the grpc http1 reverse bridge filter. +* http: added the ability to :ref:`configure max connection duration ` for downstream connections. +* listeners: added :ref:`continue_on_listener_filters_timeout ` to configure whether a listener will still create a connection when listener filters time out. +* listeners: added :ref:`HTTP inspector listener filter `. +* listeners: added :ref:`connection balancer ` configuration for TCP listeners. * listeners: listeners now close the listening socket as part of the draining stage as soon as workers stop accepting their connections. * lua: extended ``httpCall()`` and ``respond()`` APIs to accept headers with entry values that can be a string or table of strings. * lua: extended ``dynamicMetadata:set()`` to allow setting complex values. * metrics_service: added support for flushing histogram buckets. -* outlier_detector: added :ref:`support for the grpc-status response header ` by mapping it to HTTP status. Guarded by envoy.reloadable_features.outlier_detection_support_for_grpc_status which defaults to true. +* outlier_detector: added :ref:`support for the grpc-status response header ` by mapping it to HTTP status. Guarded by envoy.reloadable_features.outlier_detection_support_for_grpc_status which defaults to true. * performance: new buffer implementation enabled by default (to disable add "--use-libevent-buffers 1" to the command-line arguments when starting Envoy). * performance: stats symbol table implementation (disabled by default; to test it, add "--use-fake-symbol-table 0" to the command-line arguments when starting Envoy). -* rbac: added support for DNS SAN as :ref:`principal_name `. -* redis: added :ref:`enable_command_stats ` to enable :ref:`per command statistics ` for upstream clusters. -* redis: added :ref:`read_policy ` to allow reading from redis replicas for Redis Cluster deployments. +* rbac: added support for DNS SAN as :ref:`principal_name `. +* redis: added :ref:`enable_command_stats ` to enable :ref:`per command statistics ` for upstream clusters. +* redis: added :ref:`read_policy ` to allow reading from redis replicas for Redis Cluster deployments. * redis: fixed a bug where the redis health checker ignored the upstream auth password. * redis: enable_hashtaging is always enabled when the upstream uses open source Redis cluster protocol. -* regex: introduced new :ref:`RegexMatcher ` type that +* regex: introduced new :ref:`RegexMatcher ` type that provides a safe regex implementation for untrusted user input. This type is now used in all configuration that processes user provided input. See :ref:`deprecated configuration details - ` for more information. -* rbac: added conditions to the policy, see :ref:`condition `. -* router: added :ref:`rq_retry_skipped_request_not_complete ` counter stat to router stats. -* router: :ref:`scoped routing ` is supported. -* router: added new :ref:`retriable-headers ` retry policy. Retries can now be configured to trigger by arbitrary response header matching. + ` for more information. +* rbac: added conditions to the policy, see :ref:`condition `. +* router: added :ref:`rq_retry_skipped_request_not_complete ` counter stat to router stats. +* router: :ref:`scoped routing ` is supported. +* router: added new :ref:`retriable-headers ` retry policy. Retries can now be configured to trigger by arbitrary response header matching. * router: added ability for most specific header mutations to take precedence, see :ref:`route configuration's most specific - header mutations wins flag `. -* router: added :ref:`respect_expected_rq_timeout ` that instructs ingress Envoy to respect :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress Envoy, when deriving timeout for upstream cluster. -* router: added new :ref:`retriable request headers ` to route configuration, to allow limiting buffering for retries and shadowing. -* router: added new :ref:`retriable request headers ` to retry policies. Retries can now be configured to only trigger on request header match. + header mutations wins flag `. +* router: added :ref:`respect_expected_rq_timeout ` that instructs ingress Envoy to respect :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress Envoy, when deriving timeout for upstream cluster. +* router: added new :ref:`retriable request headers ` to route configuration, to allow limiting buffering for retries and shadowing. +* router: added new :ref:`retriable request headers ` to retry policies. Retries can now be configured to only trigger on request header match. * router: added the ability to match a route based on whether a TLS certificate has been - :ref:`presented ` by the + :ref:`presented ` by the downstream connection. * router check tool: added coverage reporting & enforcement. * router check tool: added comprehensive coverage reporting. @@ -93,68 +93,68 @@ Changes * router check tool: added coverage reporting for direct response routes. * runtime: allows for the ability to parse boolean values. * runtime: allows for the ability to parse integers as double values and vice-versa. -* sds: added :ref:`session_ticket_keys_sds_secret_config ` for loading TLS Session Ticket Encryption Keys using SDS API. +* sds: added :ref:`session_ticket_keys_sds_secret_config ` for loading TLS Session Ticket Encryption Keys using SDS API. * server: added a post initialization lifecycle event, in addition to the existing startup and shutdown events. -* server: added :ref:`per-handler listener stats ` and - :ref:`per-worker watchdog stats ` to help diagnosing event +* server: added :ref:`per-handler listener stats ` and + :ref:`per-worker watchdog stats ` to help diagnosing event loop imbalance and general performance issues. * stats: added unit support to histogram. * tcp_proxy: the default :ref:`idle_timeout - ` is now 1 hour. + ` is now 1 hour. * thrift_proxy: fixed crashing bug on invalid transport/protocol framing. * thrift_proxy: added support for stripping service name from method when using the multiplexed protocol. * tls: added verification of IP address SAN fields in certificates against configured SANs in the certificate validation context. * tracing: added support to the Zipkin reporter for sending list of spans as Zipkin JSON v2 and protobuf message over HTTP. certificate validation context. * tracing: added tags for gRPC response status and message. -* tracing: added :ref:`max_path_tag_length ` to support customizing the length of the request path included in the extracted `http.url `_ tag. -* upstream: added :ref:`an option ` that allows draining HTTP, TCP connection pools on cluster membership change. -* upstream: added :ref:`transport_socket_matches `, support using different transport socket config when connecting to different upstream endpoints within a cluster. -* upstream: added network filter chains to upstream connections, see :ref:`filters `. -* upstream: added new :ref:`failure-percentage based outlier detection ` mode. +* tracing: added :ref:`max_path_tag_length ` to support customizing the length of the request path included in the extracted `http.url `_ tag. +* upstream: added :ref:`an option ` that allows draining HTTP, TCP connection pools on cluster membership change. +* upstream: added :ref:`transport_socket_matches `, support using different transport socket config when connecting to different upstream endpoints within a cluster. +* upstream: added network filter chains to upstream connections, see :ref:`filters `. +* upstream: added new :ref:`failure-percentage based outlier detection ` mode. * upstream: uses p2c to select hosts for least-requests load balancers if all host weights are the same, even in cases where weights are not equal to 1. -* upstream: added :ref:`fail_traffic_on_panic ` to allow failing all requests to a cluster during panic state. +* upstream: added :ref:`fail_traffic_on_panic ` to allow failing all requests to a cluster during panic state. * zookeeper: parses responses and emits latency stats. Deprecated ---------- -* The ORIGINAL_DST_LB :ref:`load balancing policy ` is +* The ORIGINAL_DST_LB :ref:`load balancing policy ` is deprecated, use CLUSTER_PROVIDED policy instead when configuring an :ref:`original destination - cluster `. -* The `regex` field in :ref:`StringMatcher ` has been + cluster `. +* The `regex` field in :ref:`StringMatcher ` has been deprecated in favor of the ``safe_regex`` field. -* The `regex` field in :ref:`RouteMatch ` has been +* The `regex` field in :ref:`RouteMatch ` has been deprecated in favor of the ``safe_regex`` field. * The ``allow_origin`` and ``allow_origin_regex`` fields in :ref:`CorsPolicy - ` have been deprecated in favor of the + ` have been deprecated in favor of the ``allow_origin_string_match`` field. -* The ``pattern`` and ``method`` fields in :ref:`VirtualCluster ` +* The ``pattern`` and ``method`` fields in :ref:`VirtualCluster ` have been deprecated in favor of the ``headers`` field. -* The `regex_match` field in :ref:`HeaderMatcher ` has been +* The `regex_match` field in :ref:`HeaderMatcher ` has been deprecated in favor of the ``safe_regex_match`` field. * The ``value`` and ``regex`` fields in :ref:`QueryParameterMatcher - ` has been deprecated in favor of the ``string_match`` + ` has been deprecated in favor of the ``string_match`` and ``present_match`` fields. * The :option:`--allow-unknown-fields` command-line option, use :option:`--allow-unknown-static-fields` instead. * The use of HTTP_JSON_V1 :ref:`Zipkin collector endpoint version - ` or not explicitly + ` or not explicitly specifying it is deprecated, use HTTP_JSON or HTTP_PROTO instead. * The `operation_name` field in :ref:`HTTP connection manager - ` + ` has been deprecated in favor of the ``traffic_direction`` field in - :ref:`Listener `. The latter takes priority if + :ref:`Listener `. The latter takes priority if specified. -* The `tls_context` field in :ref:`Filter chain ` message - and :ref:`Cluster ` message have been deprecated in favor of +* The `tls_context` field in :ref:`Filter chain ` message + and :ref:`Cluster ` message have been deprecated in favor of ``transport_socket`` with name ``envoy.transport_sockets.tls``. The latter takes priority if specified. * The ``use_http2`` field in - :ref:`HTTP health checker ` has been deprecated in + :ref:`HTTP health checker ` has been deprecated in favor of the ``codec_client_type`` field. -* The use of :ref:`gRPC bridge filter ` for +* The use of :ref:`gRPC bridge filter ` for gRPC stats has been deprecated in favor of the dedicated :ref:`gRPC stats - filter ` + filter ` * Ext_authz filter stats ``ok``, ``error``, ``denied``, ``failure_mode_allowed`` in *cluster..ext_authz.* namespace is deprecated. Use *http..ext_authz.* namespace to access same counters instead. diff --git a/docs/root/version_history/v1.12.3.rst b/docs/root/version_history/v1.12.3.rst index 53b87280ad7b4..a5cc8b2241ac4 100644 --- a/docs/root/version_history/v1.12.3.rst +++ b/docs/root/version_history/v1.12.3.rst @@ -6,6 +6,6 @@ Changes * buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation. * http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature ``envoy.reloadable_features.http1_flood_protection``. -* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. -* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. +* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. +* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. * sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. diff --git a/docs/root/version_history/v1.12.4.rst b/docs/root/version_history/v1.12.4.rst index 7b606d34dbce3..c40c72182b59d 100644 --- a/docs/root/version_history/v1.12.4.rst +++ b/docs/root/version_history/v1.12.4.rst @@ -4,5 +4,5 @@ Changes ------- -* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. * http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. diff --git a/docs/root/version_history/v1.12.5.rst b/docs/root/version_history/v1.12.5.rst index dcca35f09aef3..4ceffcde602f5 100644 --- a/docs/root/version_history/v1.12.5.rst +++ b/docs/root/version_history/v1.12.5.rst @@ -4,8 +4,8 @@ Changes ------- * buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. -* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. * http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. -* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.13.0.rst b/docs/root/version_history/v1.13.0.rst index b297835547035..fe9364b5573ff 100644 --- a/docs/root/version_history/v1.13.0.rst +++ b/docs/root/version_history/v1.13.0.rst @@ -4,82 +4,82 @@ Changes ------- -* access log: added FILTER_STATE :ref:`access log formatters ` and gRPC access logger. -* admin: added the ability to filter :ref:`/config_dump `. -* access log: added a :ref:`typed JSON logging mode ` to output access logs in JSON format with non-string values -* access log: fixed UPSTREAM_LOCAL_ADDRESS :ref:`access log formatters ` to work for http requests +* access log: added FILTER_STATE :ref:`access log formatters ` and gRPC access logger. +* admin: added the ability to filter :ref:`/config_dump `. +* access log: added a :ref:`typed JSON logging mode ` to output access logs in JSON format with non-string values +* access log: fixed UPSTREAM_LOCAL_ADDRESS :ref:`access log formatters ` to work for http requests * access log: added HOSTNAME. * api: remove all support for v1 -* api: added ability to specify `mode` for :ref:`Pipe `. +* api: added ability to specify `mode` for :ref:`Pipe `. * api: support for the v3 xDS API added. See :ref:`api_supported_versions`. * aws_request_signing: added new alpha HTTP AWS request signing filter * buffer: remove old implementation * build: official released binary is now built against libc++. -* cluster: added :ref:`aggregate cluster ` that allows load balancing between clusters. +* cluster: added :ref:`aggregate cluster ` that allows load balancing between clusters. * config: all category names of internal envoy extensions are prefixed with the 'envoy.' prefix to follow the reverse DNS naming notation. * decompressor: remove decompressor hard assert failure and replace with an error flag. -* ext_authz: added :ref:`configurable ability ` to send the :ref:`certificate ` to the `ext_authz` service. +* ext_authz: added :ref:`configurable ability ` to send the :ref:`certificate ` to the `ext_authz` service. * fault: fixed an issue where the http fault filter would repeatedly check the percentage of abort/delay when the ``x-envoy-downstream-service-cluster`` header was included in the request to ensure that the actual percentage of abort/delay matches the configuration of the filter. * health check: gRPC health checker sets the gRPC deadline to the configured timeout duration. -* health check: added :ref:`TlsOptions ` to allow TLS configuration overrides. -* health check: added :ref:`service_name_matcher ` to better compare the service name patterns for health check identity. +* health check: added :ref:`TlsOptions ` to allow TLS configuration overrides. +* health check: added :ref:`service_name_matcher ` to better compare the service name patterns for health check identity. * http: added strict validation that CONNECT is refused as it is not yet implemented. This can be reversed temporarily by setting the runtime feature ``envoy.reloadable_features.strict_method_validation`` to false. -* http: added support for http1 trailers. To enable use :ref:`enable_trailers `. +* http: added support for http1 trailers. To enable use :ref:`enable_trailers `. * http: added the ability to sanitize headers nominated by the Connection header. This new behavior is guarded by ``envoy.reloadable_features.connection_header_sanitization`` which defaults to true. * http: blocks unsupported transfer-encodings. Can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.reject_unsupported_transfer_encodings`` to false. -* http: support :ref:`auto_host_rewrite_header ` in the dynamic forward proxy. -* jwt_authn: added :ref:`allow_missing ` option that accepts request without token but rejects bad request with bad tokens. -* jwt_authn: added :ref:`bypass_cors_preflight ` to allow bypassing the CORS preflight request. -* lb_subset_config: new fallback policy for selectors: :ref:`KEYS_SUBSET ` -* listeners: added :ref:`reuse_port ` option. -* logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. -* ratelimit: added :ref:`local rate limit ` network filter. -* rbac: added support for matching all subject alt names instead of first in :ref:`principal_name `. +* http: support :ref:`auto_host_rewrite_header ` in the dynamic forward proxy. +* jwt_authn: added :ref:`allow_missing ` option that accepts request without token but rejects bad request with bad tokens. +* jwt_authn: added :ref:`bypass_cors_preflight ` to allow bypassing the CORS preflight request. +* lb_subset_config: new fallback policy for selectors: :ref:`KEYS_SUBSET ` +* listeners: added :ref:`reuse_port ` option. +* logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. +* ratelimit: added :ref:`local rate limit ` network filter. +* rbac: added support for matching all subject alt names instead of first in :ref:`principal_name `. * redis: performance improvement for larger split commands by avoiding string copies. * redis: correctly follow MOVE/ASK redirection for mirrored clusters. -* redis: add :ref:`host_degraded_refresh_threshold ` and :ref:`failure_refresh_threshold ` to refresh topology when nodes are degraded or when requests fails. -* router: added histograms to show timeout budget usage to the :ref:`cluster stats `. +* redis: add :ref:`host_degraded_refresh_threshold ` and :ref:`failure_refresh_threshold ` to refresh topology when nodes are degraded or when requests fails. +* router: added histograms to show timeout budget usage to the :ref:`cluster stats `. * router check tool: added support for testing and marking coverage for routes of runtime fraction 0. -* router: added :ref:`request_mirror_policies ` to support sending multiple mirrored requests in one route. -* router: added support for REQ(header-name) :ref:`header formatter `. -* router: added support for percentage-based :ref:`retry budgets ` -* router: allow using a :ref:`query parameter ` for HTTP consistent hashing. +* router: added :ref:`request_mirror_policies ` to support sending multiple mirrored requests in one route. +* router: added support for REQ(header-name) :ref:`header formatter `. +* router: added support for percentage-based :ref:`retry budgets ` +* router: allow using a :ref:`query parameter ` for HTTP consistent hashing. * router: exposed DOWNSTREAM_REMOTE_ADDRESS as custom HTTP request/response headers. -* router: added support for :ref:`max_internal_redirects ` for configurable maximum internal redirect hops. +* router: added support for :ref:`max_internal_redirects ` for configurable maximum internal redirect hops. * router: skip the Location header when the response code is not a 201 or a 3xx. -* router: added :ref:`auto_sni ` to support setting SNI to transport socket for new upstream connections based on the downstream HTTP host/authority header. +* router: added :ref:`auto_sni ` to support setting SNI to transport socket for new upstream connections based on the downstream HTTP host/authority header. * router: added support for HOSTNAME :ref:`header formatter - `. + `. * server: added the :option:`--disable-extensions` CLI option, to disable extensions at startup. * server: fixed a bug in config validation for configs with runtime layers. -* server: added :ref:`workers_started ` that indicates whether listeners have been fully initialized on workers. -* tcp_proxy: added :ref:`ClusterWeight.metadata_match `. -* tcp_proxy: added :ref:`hash_policy `. +* server: added :ref:`workers_started ` that indicates whether listeners have been fully initialized on workers. +* tcp_proxy: added :ref:`ClusterWeight.metadata_match `. +* tcp_proxy: added :ref:`hash_policy `. * thrift_proxy: added support for cluster header based routing. * thrift_proxy: added stats to the router filter. * tls: remove TLS 1.0 and 1.1 from client defaults -* tls: added support for :ref:`generic string matcher ` for subject alternative names. -* tracing: added the ability to set custom tags on both the :ref:`HTTP connection manager ` and the :ref:`HTTP route `. +* tls: added support for :ref:`generic string matcher ` for subject alternative names. +* tracing: added the ability to set custom tags on both the :ref:`HTTP connection manager ` and the :ref:`HTTP route `. * tracing: added upstream_address tag. -* tracing: added initial support for AWS X-Ray (local sampling rules only) :ref:`X-Ray Tracing `. +* tracing: added initial support for AWS X-Ray (local sampling rules only) :ref:`X-Ray Tracing `. * tracing: added tags for gRPC request path, authority, content-type and timeout. -* udp: added initial support for :ref:`UDP proxy ` +* udp: added initial support for :ref:`UDP proxy ` Deprecated ---------- * The `request_headers_for_tags` field in :ref:`HTTP connection manager - ` + ` has been deprecated in favor of the :ref:`custom_tags - ` field. + ` field. * The `verify_subject_alt_name` field in :ref:`Certificate Validation Context - ` + ` has been deprecated in favor of the :ref:`match_subject_alt_names - ` field. -* The ``request_mirror_policy`` field in :ref:`RouteMatch ` has been deprecated in + ` field. +* The ``request_mirror_policy`` field in :ref:`RouteMatch ` has been deprecated in favor of the ``request_mirror_policies`` field. * The ``service_name`` field in - :ref:`HTTP health checker ` has been deprecated in + :ref:`HTTP health checker ` has been deprecated in favor of the ``service_name_matcher`` field. * The v2 xDS API is deprecated. It will be supported by Envoy until EOY 2020. See :ref:`api_supported_versions`. diff --git a/docs/root/version_history/v1.13.1.rst b/docs/root/version_history/v1.13.1.rst index 46d05ebc9d5c9..1b7b97a20018f 100644 --- a/docs/root/version_history/v1.13.1.rst +++ b/docs/root/version_history/v1.13.1.rst @@ -6,6 +6,6 @@ Changes * buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation. * http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature ``envoy.reloadable_features.http1_flood_protection``. -* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. -* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. +* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. +* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. * sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. diff --git a/docs/root/version_history/v1.13.2.rst b/docs/root/version_history/v1.13.2.rst index 5ef942997b7c2..fb8703191b376 100644 --- a/docs/root/version_history/v1.13.2.rst +++ b/docs/root/version_history/v1.13.2.rst @@ -4,5 +4,5 @@ Changes ------- -* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. * http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. diff --git a/docs/root/version_history/v1.13.3.rst b/docs/root/version_history/v1.13.3.rst index 8cdbfe128c931..a83da6a749a66 100644 --- a/docs/root/version_history/v1.13.3.rst +++ b/docs/root/version_history/v1.13.3.rst @@ -5,8 +5,8 @@ Changes ------- * buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. -* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. * http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. -* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.14.0.rst b/docs/root/version_history/v1.14.0.rst index b677dac0f96da..302b9dc44e8fb 100644 --- a/docs/root/version_history/v1.14.0.rst +++ b/docs/root/version_history/v1.14.0.rst @@ -5,85 +5,85 @@ Changes ------- * access log: access logger extensions use the "envoy.access_loggers" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. -* access log: added support for ``%DOWNSTREAM_LOCAL_PORT%`` :ref:`access log formatters `. + of extension names is available in the :ref:`deprecated ` documentation. +* access log: added support for ``%DOWNSTREAM_LOCAL_PORT%`` :ref:`access log formatters `. * access log: fixed ``%DOWSTREAM_DIRECT_REMOTE_ADDRESS%`` when used with PROXY protocol listener filter. -* access log: introduced :ref:`connection-level access loggers `. +* access log: introduced :ref:`connection-level access loggers `. * adaptive concurrency: fixed bug that allowed concurrency limits to drop below the configured minimum. * adaptive concurrency: minRTT is now triggered when the minimum concurrency is maintained for 5 consecutive sampling intervals. -* admin: added support for displaying ip address subject alternate names in :ref:`certs ` end point. +* admin: added support for displaying ip address subject alternate names in :ref:`certs ` end point. * admin: added :http:post:`/reopen_logs` endpoint to control log rotation. * api: froze v2 xDS API. New feature development in the API should occur in v3 xDS. While the v2 xDS API has been deprecated since 1.13.0, it will continue to be supported by Envoy until EOY 2020. See :ref:`api_supported_versions`. -* aws_lambda: added :ref:`AWS Lambda filter ` that converts HTTP requests to Lambda +* aws_lambda: added :ref:`AWS Lambda filter ` that converts HTTP requests to Lambda invokes. This effectively makes Envoy act as an egress gateway to AWS Lambda. * aws_request_signing: a few fixes so that it works with S3. -* config: added stat :ref:`update_time `. -* config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. +* config: added stat :ref:`update_time `. +* config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. * datasource: added retry policy for remote async data source. -* dns: added support for :ref:`dns_failure_refresh_rate ` for the :ref:`dns cache ` to set the DNS refresh rate during failures. +* dns: added support for :ref:`dns_failure_refresh_rate ` for the :ref:`dns cache ` to set the DNS refresh rate during failures. * dns: the STRICT_DNS cluster now only resolves to 0 hosts if DNS resolution successfully returns 0 hosts. -* eds: added :ref:`hostname ` field for endpoints and :ref:`hostname ` field for endpoint's health check config. This enables auto host rewrite and customizing the host header during health checks for eds endpoints. +* eds: added :ref:`hostname ` field for endpoints and :ref:`hostname ` field for endpoint's health check config. This enables auto host rewrite and customizing the host header during health checks for eds endpoints. * ext_authz: disabled the use of lowercase string matcher for headers matching in HTTP-based ``ext_authz``. Can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher`` to false. -* fault: added support for controlling abort faults with :ref:`HTTP header fault configuration ` to the HTTP fault filter. +* fault: added support for controlling abort faults with :ref:`HTTP header fault configuration ` to the HTTP fault filter. * grpc-json: added support for building HTTP request into `google.api.HttpBody `_. * grpc-stats: added option to limit which messages stats are created for. * http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature ``envoy.reloadable_features.http1_flood_protection``. -* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. -* http: added :ref:`max_stream_duration ` to specify the duration of existing streams. See :ref:`connection and stream timeouts `. +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: added :ref:`max_stream_duration ` to specify the duration of existing streams. See :ref:`connection and stream timeouts `. * http: connection header sanitizing has been modified to always sanitize if there is no upgrade, including when an h2c upgrade attempt has been removed. * http: fixed a bug that could send extra METADATA frames and underflow memory when encoding METADATA frames on a connection that was dispatching data. * http: fixing a bug in HTTP/1.0 responses where Connection: keep-alive was not appended for connections which were kept alive. * http: http filter extensions use the "envoy.filters.http" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. + of extension names is available in the :ref:`deprecated ` documentation. * http: the runtime feature ``http.connection_manager.log_flood_exception`` is removed and replaced with a connection access log response code. * http: upgrade parser library, which removes support for "identity" transfer-encoding value. * listener filters: listener filter extensions use the "envoy.filters.listener" name space. A - mapping of extension names is available in the :ref:`deprecated ` documentation. -* listeners: added :ref:`listener filter matcher api ` to disable individual listener filter on matching downstream connections. -* loadbalancing: added support for using hostname for consistent hash loadbalancing via :ref:`consistent_hash_lb_config `. -* loadbalancing: added support for :ref:`retry host predicates ` in conjunction with consistent hashing load balancers (ring hash and maglev). + mapping of extension names is available in the :ref:`deprecated ` documentation. +* listeners: added :ref:`listener filter matcher api ` to disable individual listener filter on matching downstream connections. +* loadbalancing: added support for using hostname for consistent hash loadbalancing via :ref:`consistent_hash_lb_config `. +* loadbalancing: added support for :ref:`retry host predicates ` in conjunction with consistent hashing load balancers (ring hash and maglev). * lua: added a parameter to ``httpCall`` that makes it possible to have the call be asynchronous. * lua: added moonjit support. -* mongo: the stat emitted for queries without a max time set in the :ref:`MongoDB filter ` was modified to emit correctly for Mongo v3.2+. -* network filters: added a :ref:`direct response filter `. +* mongo: the stat emitted for queries without a max time set in the :ref:`MongoDB filter ` was modified to emit correctly for Mongo v3.2+. +* network filters: added a :ref:`direct response filter `. * network filters: network filter extensions use the "envoy.filters.network" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. -* rbac: added :ref:`remote_ip ` and :ref:`direct_remote_ip ` for matching downstream remote IP address. -* rbac: deprecated :ref:`source_ip ` with :ref:`direct_remote_ip ` and :ref:`remote_ip `. -* request_id_extension: added an ability to extend request ID handling at :ref:`HTTP connection manager `. -* retry: added a retry predicate that :ref:`rejects hosts based on metadata. `. + of extension names is available in the :ref:`deprecated ` documentation. +* rbac: added :ref:`remote_ip ` and :ref:`direct_remote_ip ` for matching downstream remote IP address. +* rbac: deprecated :ref:`source_ip ` with :ref:`direct_remote_ip ` and :ref:`remote_ip `. +* request_id_extension: added an ability to extend request ID handling at :ref:`HTTP connection manager `. +* retry: added a retry predicate that :ref:`rejects hosts based on metadata. `. * router: added ability to set attempt count in downstream response, see :ref:`virtual host's include response - attempt count config `. -* router: added additional stats for :ref:`virtual clusters `. -* router: added :ref:`auto_san_validation ` to support overrriding SAN validation to transport socket for new upstream connections based on the downstream HTTP host/authority header. + attempt count config `. +* router: added additional stats for :ref:`virtual clusters `. +* router: added :ref:`auto_san_validation ` to support overrriding SAN validation to transport socket for new upstream connections based on the downstream HTTP host/authority header. * router: added the ability to match a route based on whether a downstream TLS connection certificate has been - :ref:`validated `. + :ref:`validated `. * router: added support for :ref:`regex_rewrite - ` for path rewriting using regular expressions and capture groups. -* router: added support for `%DOWNSTREAM_LOCAL_PORT%` :ref:`header formatter `. -* router: don't ignore :ref:`per_try_timeout ` when the :ref:`global route timeout ` is disabled. -* router: strip whitespace for :ref:`retry_on `, :ref:`grpc-retry-on header ` and :ref:`retry-on header `. + ` for path rewriting using regular expressions and capture groups. +* router: added support for `%DOWNSTREAM_LOCAL_PORT%` :ref:`header formatter `. +* router: don't ignore :ref:`per_try_timeout ` when the :ref:`global route timeout ` is disabled. +* router: strip whitespace for :ref:`retry_on `, :ref:`grpc-retry-on header ` and :ref:`retry-on header `. * runtime: enabling the runtime feature ``envoy.deprecated_features.allow_deprecated_extension_names`` disables the use of deprecated extension names. * runtime: integer values may now be parsed as booleans. -* sds: added :ref:`GenericSecret ` to support secret of generic type. -* sds: added :ref:`certificate rotation ` support for certificates in static resources. +* sds: added :ref:`GenericSecret ` to support secret of generic type. +* sds: added :ref:`certificate rotation ` support for certificates in static resources. * server: the SIGUSR1 access log reopen warning now is logged at info level. * stat sinks: stat sink extensions use the "envoy.stat_sinks" name space. A mapping of extension - names is available in the :ref:`deprecated ` documentation. + names is available in the :ref:`deprecated ` documentation. * thrift_proxy: added router filter stats to docs. -* tls: added configuration to disable stateless TLS session resumption :ref:`disable_stateless_session_resumption `. +* tls: added configuration to disable stateless TLS session resumption :ref:`disable_stateless_session_resumption `. * tracing: added gRPC service configuration to the OpenCensus Stackdriver and OpenCensus Agent tracers. * tracing: tracer extensions use the "envoy.tracers" name space. A mapping of extension names is - available in the :ref:`deprecated ` documentation. -* upstream: added ``upstream_rq_retry_limit_exceeded`` to :ref:`cluster `, and :ref:`virtual cluster ` stats. -* upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode `. + available in the :ref:`deprecated ` documentation. +* upstream: added ``upstream_rq_retry_limit_exceeded`` to :ref:`cluster `, and :ref:`virtual cluster ` stats. +* upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode `. * upstream: combined HTTP/1 and HTTP/2 connection pool code. This means that circuit breaker limits for both requests and connections apply to both pool types. Also, HTTP/2 now has the option to limit concurrent requests on a connection, and allow multiple draining @@ -91,7 +91,7 @@ Changes period by disabling runtime feature ``envoy.reloadable_features.new_http1_connection_pool_behavior`` or ``envoy.reloadable_features.new_http2_connection_pool_behavior`` and then re-configure your clusters or restart Envoy. The behavior will not switch until the connection pools are recreated. The new - circuit breaker behavior is described :ref:`here `. + circuit breaker behavior is described :ref:`here `. * zlib: by default zlib is initialized to use its default strategy (Z_DEFAULT_STRATEGY) instead of the fixed one (Z_FIXED). The difference is that the use of dynamic Huffman codes is enabled now resulting in better compression ratio for normal data. @@ -101,7 +101,7 @@ Deprecated * The previous behavior for upstream connection pool circuit breaking described `here `_ has - been deprecated in favor of the new behavior described :ref:`here `. + been deprecated in favor of the new behavior described :ref:`here `. * Access Logger, Listener Filter, HTTP Filter, Network Filter, Stats Sink, and Tracer names have been deprecated in favor of the extension name from the envoy build system. Disable the runtime feature "envoy.deprecated_features.allow_deprecated_extension_names" to disallow the deprecated @@ -166,27 +166,27 @@ Deprecated * Tracers * The previous behavior of auto ignoring case in headers matching: - :ref:`allowed_headers `, - :ref:`allowed_upstream_headers `, - and :ref:`allowed_client_headers ` + :ref:`allowed_headers `, + :ref:`allowed_upstream_headers `, + and :ref:`allowed_client_headers ` of HTTP-based ``ext_authz`` has been deprecated in favor of explicitly setting the - :ref:`ignore_case ` field. + :ref:`ignore_case ` field. * The ``header_fields``, ``custom_header_fields``, and ``additional_headers`` fields for the route checker tool have been deprecated in favor of ``request_header_fields``, ``response_header_fields``, ``additional_request_headers``, and ``additional_response_headers``. * The ``content_length``, ``content_type``, ``disable_on_etag_header`` and ``remove_accept_encoding_header`` - fields in :ref:`HTTP Gzip filter config ` have + fields in :ref:`HTTP Gzip filter config ` have been deprecated in favor of ``compressor``. -* The statistics counter ``header_gzip`` in :ref:`HTTP Gzip filter ` +* The statistics counter ``header_gzip`` in :ref:`HTTP Gzip filter ` has been deprecated in favor of ``header_compressor_used``. * Support for the undocumented HTTP/1.1 ``:no-chunks`` pseudo-header has been removed. If an extension was using this it can achieve the same behavior via the new ``http1StreamEncoderOptions()`` API. * The grpc_stats filter behavior of by default creating a new stat for every message type seen is deprecated. The default will switch to only creating a fixed set of stats. The previous behavior can be enabled by enabling - :ref:`stats_for_all_methods `, + :ref:`stats_for_all_methods `, and the previous default can be enabled until the end of the deprecation period by enabling runtime feature ``envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default``. -* The :ref:`source_ip ` field in +* The :ref:`source_ip ` field in `RBAC `_ has been deprecated - in favor of :ref:`direct_remote_ip ` and - :ref:`remote_ip `. + in favor of :ref:`direct_remote_ip ` and + :ref:`remote_ip `. diff --git a/docs/root/version_history/v1.14.2.rst b/docs/root/version_history/v1.14.2.rst index c7d4731d865b4..a9867a9afe0c3 100644 --- a/docs/root/version_history/v1.14.2.rst +++ b/docs/root/version_history/v1.14.2.rst @@ -5,10 +5,10 @@ Changes ------- * http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. -* http: the :ref:`stream_idle_timeout ` +* http: the :ref:`stream_idle_timeout ` now also defends against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. -* listener: Add runtime support for :ref:`per-listener limits ` on +* listener: Add runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: Add runtime support for :ref:`global limits ` +* overload management: Add runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.14.3.rst b/docs/root/version_history/v1.14.3.rst index 523a4fc9a607e..66526566ea0af 100644 --- a/docs/root/version_history/v1.14.3.rst +++ b/docs/root/version_history/v1.14.3.rst @@ -4,8 +4,8 @@ Changes ------- * buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. -* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. * http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. -* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.14.7.rst b/docs/root/version_history/v1.14.7.rst index 041b5da018f82..d476b23bd2e07 100644 --- a/docs/root/version_history/v1.14.7.rst +++ b/docs/root/version_history/v1.14.7.rst @@ -2,7 +2,7 @@ ======================= Changes ------- -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: fixed bugs in datadog and squash filter's handling of responses with no bodies. * http: fixed URL parsing for HTTP/1.1 fully qualified URLs and connect requests containing IPv6 addresses. diff --git a/docs/root/version_history/v1.15.0.rst b/docs/root/version_history/v1.15.0.rst index d565e35bb5cce..a730b9d283915 100644 --- a/docs/root/version_history/v1.15.0.rst +++ b/docs/root/version_history/v1.15.0.rst @@ -8,7 +8,7 @@ Incompatible Behavior Changes * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * client_ssl_auth: the ``auth_ip_white_list`` stat has been renamed to - :ref:`auth_ip_allowlist `. + :ref:`auth_ip_allowlist `. * header to metadata: on_header_missing rules with empty values are now rejected (they were skipped before). * router: path_redirect now keeps query string by default. This behavior may be reverted by setting runtime feature ``envoy.reloadable_features.preserve_query_string_in_path_redirects`` to false. * tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature ``envoy.reloadable_features.fix_wildcard_matching`` to false. @@ -17,9 +17,9 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.disallow_unbounded_access_logs`` to false. +* access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.disallow_unbounded_access_logs`` to false. * build: runs as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable ``ENVOY_UID`` to ``0``. ``ENVOY_UID`` and ``ENVOY_GID`` can be used to set the envoy user's ``uid`` and ``gid`` respectively. -* health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. +* health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. * hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). * http: changed early error path for HTTP/1.1 so that responses consistently flow through the http connection manager, and the http filter chains. This behavior may be temporarily reverted by setting runtime feature ``envoy.reloadable_features.early_errors_via_hcm`` to false. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature ``envoy.reloadable_features.fix_connection_close`` to false. @@ -31,7 +31,7 @@ Minor Behavior Changes * http: upstream connections will now automatically set ALPN when this value is not explicitly set elsewhere (e.g. on the upstream TLS config). This behavior may be temporarily reverted by setting runtime feature ``envoy.reloadable_features.http_default_alpn`` to false. * listener: fixed a bug where when a static listener fails to be added to a worker, the listener was not removed from the active listener list. * router: extended to allow retries of streaming or incomplete requests. This removes stat ``rq_retry_skipped_request_not_complete``. -* router: extended to allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. +* router: extended to allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. Bug Fixes --------- @@ -43,22 +43,22 @@ Bug Fixes * buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. * grpc-json: fixed a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. * http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. -* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes ` is enabled. -* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` +* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes ` is enabled. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. * http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature ``envoy.reloadable_features.fix_connection_close`` to false. -* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. * prometheus stats: fixed the sort order of output lines to comply with the standard. -* udp: the :ref:`reuse_port ` listener option must now be +* udp: the :ref:`reuse_port ` listener option must now be specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a bug fix. * upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * http: removed legacy connection pool code and their runtime features: ``envoy.reloadable_features.new_http1_connection_pool_behavior`` and ``envoy.reloadable_features.new_http2_connection_pool_behavior``. @@ -66,101 +66,101 @@ Removed Config or Runtime New Features ------------ -* access loggers: added file access logger config :ref:`log_format `. +* access loggers: added file access logger config :ref:`log_format `. * access loggers: added GRPC_STATUS operator on logging format. -* access loggers: added gRPC access logger config added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. -* access loggers: extended specifier for FilterStateFormatter to output :ref:`unstructured log string `. -* admin: added support for dumping EDS config at :ref:`/config_dump?include_eds `. -* aggregate cluster: made route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. +* access loggers: added gRPC access logger config added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* access loggers: extended specifier for FilterStateFormatter to output :ref:`unstructured log string `. +* admin: added support for dumping EDS config at :ref:`/config_dump?include_eds `. +* aggregate cluster: made route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * build: official released binary is now built with Clang 10.0.0. -* cluster: added an extension point for configurable :ref:`upstreams `. -* compressor: exposed generic :ref:`compressor ` filter to users. -* config: added :ref:`identifier ` stat that reflects control plane identifier. -* config: added :ref:`version_text ` stat that reflects xDS version. -* decompressor: exposed generic :ref:`decompressor ` filter to users. -* dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. -* dynamic forward proxy: added configurable :ref:`circuit breakers ` for resolver on DNS cache. +* cluster: added an extension point for configurable :ref:`upstreams `. +* compressor: exposed generic :ref:`compressor ` filter to users. +* config: added :ref:`identifier ` stat that reflects control plane identifier. +* config: added :ref:`version_text ` stat that reflects xDS version. +* decompressor: exposed generic :ref:`decompressor ` filter to users. +* dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. +* dynamic forward proxy: added configurable :ref:`circuit breakers ` for resolver on DNS cache. This behavior can be temporarily disabled by the runtime feature ``envoy.reloadable_features.enable_dns_cache_circuit_breakers``. - If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers ` are configured. -* dynamic forward proxy: added :ref:`allow_insecure_cluster_options ` to allow disabling of auto_san_validation and auto_sni. -* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows force denying protected paths while filter gets disabled, by setting this key to true. -* ext_authz filter: added API version field for both :ref:`HTTP ` - and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. -* ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append ` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers. + If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers ` are configured. +* dynamic forward proxy: added :ref:`allow_insecure_cluster_options ` to allow disabling of auto_san_validation and auto_sni. +* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows force denying protected paths while filter gets disabled, by setting this key to true. +* ext_authz filter: added API version field for both :ref:`HTTP ` + and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. +* ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append ` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults - are applied to using :ref:`HTTP headers ` to the HTTP fault filter. + are applied to using :ref:`HTTP headers ` to the HTTP fault filter. * fault: added support for specifying grpc_status code in abort faults using - :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. + :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. * filter: added ``upstream_rq_time`` stats to the GPRC stats filter. - Disabled by default and can be enabled via :ref:`enable_upstream_stats `. -* grpc: added support for Google gRPC :ref:`custom channel arguments `. + Disabled by default and can be enabled via :ref:`enable_upstream_stats `. +* grpc: added support for Google gRPC :ref:`custom channel arguments `. * grpc-json: added support for streaming response using `google.api.HttpBody `_. * grpc-json: send a ``x-envoy-original-method`` header to grpc services. * gzip filter: added option to set zlib's next output buffer size. * hds: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. * header to metadata: added support for regex substitutions on header values. -* health checks: allowed configuring health check transport sockets by specifying :ref:`transport socket match criteria `. -* http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. -* http: added :ref:`stripping port from host header ` support. -* http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation ` for details. +* health checks: allowed configuring health check transport sockets by specifying :ref:`transport socket match criteria `. +* http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. +* http: added :ref:`stripping port from host header ` support. +* http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation ` for details. * listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. Can be disabled by setting runtime feature ``envoy.reloadable_features.listener_in_place_filterchain_update`` to false. - Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. + Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. * logger: added ``--log-format-prefix-with-location`` command line option to prefix '%v' with file path and line number. * lrs: added new ``envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters`` field in LRS response, which allows management servers to avoid explicitly listing all clusters it is interested in; behavior is allowed based on new ``envoy.lrs.supports_send_all_clusters`` capability - in :ref:`client_features ` field. + in :ref:`client_features ` field. * lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. -* lua: added :ref:`per route config ` for Lua filter. +* lua: added :ref:`per route config ` for Lua filter. * lua: added tracing to the ``httpCall()`` API. -* metrics service: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. -* network filters: added a :ref:`postgres proxy filter `. -* network filters: added a :ref:`rocketmq proxy filter `. +* metrics service: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* network filters: added a :ref:`postgres proxy filter `. +* network filters: added a :ref:`rocketmq proxy filter `. * performance: enabled stats symbol table implementation by default. To disable it, add ``--use-fake-symbol-table 1`` to the command-line arguments when starting Envoy. -* ratelimit: added support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. -* ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. -* ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override ` config. -* redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. -* regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. -* request_id: added to :ref:`always_set_request_id_in_response setting ` - to set :ref:`x-request-id ` header in response even if +* ratelimit: added support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. +* ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override ` config. +* redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. +* regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. +* request_id: added to :ref:`always_set_request_id_in_response setting ` + to set :ref:`x-request-id ` header in response even if tracing is not forced. * router: added more fine grained internal redirect configs to the :ref:`internal_redirect_policy - ` field. + ` field. * router: added regex substitution support for header based hashing. * router: added support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters - `. -* router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. -* runtime: added new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. + `. +* router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. +* runtime: added new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. * server: added the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose(). -* server: added :ref:`server.envoy_bug_failures ` statistic to count ENVOY_BUG failures. -* stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. +* server: added :ref:`server.envoy_bug_failures ` statistic to count ENVOY_BUG failures. +* stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. * tracing: made tracing configuration fully dynamic and every HTTP connection manager - can now have a separate :ref:`tracing provider `. -* udp: upgraded :ref:`udp_proxy ` filter to v3 and promoted it out of alpha. + can now have a separate :ref:`tracing provider `. +* udp: upgraded :ref:`udp_proxy ` filter to v3 and promoted it out of alpha. Deprecated ---------- -* Tracing provider configuration as part of :ref:`bootstrap config ` +* Tracing provider configuration as part of :ref:`bootstrap config ` has been deprecated in favor of configuration as part of :ref:`HTTP connection manager - `. -* The :ref:`HTTP Gzip filter ` has been deprecated in favor of - :ref:`Compressor `. -* The * :ref:`GoogleRE2.max_program_size ` + `. +* The :ref:`HTTP Gzip filter ` has been deprecated in favor of + :ref:`Compressor `. +* The * :ref:`GoogleRE2.max_program_size ` field is now deprecated. Management servers are expected to validate regexp program sizes instead of expecting the client to do it. Alternatively, the max program size can be enforced by Envoy via runtime. -* The :ref:`internal_redirect_action ` - field and :ref:`max_internal_redirects ` field +* The :ref:`internal_redirect_action ` + field and :ref:`max_internal_redirects ` field are now deprecated. This changes the implemented default cross scheme redirect behavior. All cross scheme redirects are disallowed by default. To restore the previous behavior, set allow_cross_scheme_redirect=true and use - :ref:`safe_cross_scheme `, - in :ref:`predicates `. -* File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. + :ref:`safe_cross_scheme `, + in :ref:`predicates `. +* File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. * A warning is now logged when v2 xDS api is used. This behavior can be temporarily disabled by setting ``envoy.reloadable_features.enable_deprecated_v2_api_warning`` to ``false``. -* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting ``envoy.reloadable_features.enable_dns_cache_circuit_breakers`` to ``false``. +* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting ``envoy.reloadable_features.enable_dns_cache_circuit_breakers`` to ``false``. diff --git a/docs/root/version_history/v1.15.4.rst b/docs/root/version_history/v1.15.4.rst index f40b70a69cfd0..10d03f007f27a 100644 --- a/docs/root/version_history/v1.15.4.rst +++ b/docs/root/version_history/v1.15.4.rst @@ -4,7 +4,7 @@ Changes ------- -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: fixed URL parsing for HTTP/1.1 fully qualified URLs and connect requests containing IPv6 addresses. * http: fixed bugs in datadog and squash filter's handling of responses with no bodies. @@ -13,7 +13,7 @@ Changes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.15.5.rst b/docs/root/version_history/v1.15.5.rst index ca0fcbab5d56f..7d4fbe370c323 100644 --- a/docs/root/version_history/v1.15.5.rst +++ b/docs/root/version_history/v1.15.5.rst @@ -6,11 +6,11 @@ Changes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ -* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. Deprecated ---------- diff --git a/docs/root/version_history/v1.16.0.rst b/docs/root/version_history/v1.16.0.rst index 32cb2a70dd236..51c332b3bc99d 100644 --- a/docs/root/version_history/v1.16.0.rst +++ b/docs/root/version_history/v1.16.0.rst @@ -5,10 +5,10 @@ Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* -* build: added visibility rules for upstream. If these cause visibility related breakage, see notes in :repo:`BUILD `. +* build: added visibility rules for upstream. If these cause visibility related breakage, see notes in :repo:`BUILD `. * build: tcmalloc changes require Clang 9. This requirement change can be avoided by building with ``--define tcmalloc=gperftools`` to use the older tcmalloc code. * config: additional warnings have been added for the use of v2 APIs. These appear as log messages - and are also captured in the :ref:`deprecated_feature_use ` counter after server + and are also captured in the :ref:`deprecated_feature_use ` counter after server initialization. * dns: ``envoy.restart_features.use_apple_api_for_dns_lookups`` is on by default. This flag only affects Apple platforms (macOS, iOS). It is incompatible to have the runtime flag set to true at the same time as specifying the ````use_tcp_for_dns_lookups```` option or custom dns resolvers. Doing so will cause failure. * watchdog: added two guarddogs, breaking the aggregated stats for the single guarddog system. The aggregated stats for the guarddogs will have the following prefixes: ``main_thread`` and ``workers``. Concretely, anything monitoring ``server.watchdog_miss`` and ``server.watchdog_mega_miss`` will need to be updated. @@ -19,20 +19,20 @@ Minor Behavior Changes * adaptive concurrency: added a response body / grpc-message header for rejected requests. * async_client: minor change to handling header only responses more similar to header-with-empty-body responses. -* build: an :ref:`Ubuntu based debug image ` is built and published in DockerHub. +* build: an :ref:`Ubuntu based debug image ` is built and published in DockerHub. * build: the debug information will be generated separately to reduce target size and reduce compilation time when build in compilation mode ``dbg`` and ``opt``. Users will need to build dwp file to debug with gdb. * compressor: always insert ``Vary`` headers for compressible resources even if it's decided not to compress a response due to incompatible ``Accept-Encoding`` value. The ``Vary`` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. * decompressor: headers-only requests were incorrectly not advertising accept-encoding when configured to do so. This is now fixed. * ext_authz filter: request timeout will now count from the time the check request is created, instead of when it becomes active. This makes sure that the timeout is enforced even if the ext_authz cluster's circuit breaker is engaged. - This behavior can be reverted by setting runtime feature ``envoy.reloadable_features.ext_authz_measure_timeout_on_check_created`` to false. When enabled, a new ``ext_authz.timeout`` stat is counted when timeout occurs. See :ref:`stats `. + This behavior can be reverted by setting runtime feature ``envoy.reloadable_features.ext_authz_measure_timeout_on_check_created`` to false. When enabled, a new ``ext_authz.timeout`` stat is counted when timeout occurs. See :ref:`stats `. * grpc reverse bridge: upstream headers will no longer be propagated when the response is missing or contains an unexpected content-type. -* http: added :ref:`contains `, a new string matcher type which matches if the value of the string has the substring mentioned in contains matcher. -* http: added :ref:`contains `, a new header matcher type which matches if the value of the header has the substring mentioned in contains matcher. -* http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. -* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` to false, or permanently reverted by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message ` to true to restore prior HTTP/1.1 behavior (i.e. connection isn't terminated) and to retain prior HTTP/2 behavior (i.e. connection is terminated). -* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. +* http: added :ref:`contains `, a new string matcher type which matches if the value of the string has the substring mentioned in contains matcher. +* http: added :ref:`contains `, a new header matcher type which matches if the value of the header has the substring mentioned in contains matcher. +* http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. +* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` to false, or permanently reverted by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message ` to true to restore prior HTTP/1.1 behavior (i.e. connection isn't terminated) and to retain prior HTTP/2 behavior (i.e. connection is terminated). +* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. * http: applying route level header modifications to local replies sent on that route. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.always_apply_route_header_rules`` to false. -* http: changed Envoy to send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2`` to false. +* http: changed Envoy to send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2`` to false. * http: changed Envoy to send error headers and body when possible. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.allow_response_for_timeout`` to false. * http: changed empty trailers encoding behavior by sending empty data with ``end_stream`` true (instead of sending empty trailers) for HTTP/2. This behavior can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.http2_skip_encoding_empty_trailers`` to false. * http: changed how local replies are processed for requests which transform from grpc to not-grpc, or not-grpc to grpc. Previously the initial generated reply depended on which filter sent the reply, but now the reply is consistently generated the way the downstream expects. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.unify_grpc_handling`` to false. @@ -52,7 +52,7 @@ Minor Behavior Changes * router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature ``envoy.reloadable_features.consume_all_retry_headers`` to false. * stats: the fake symbol table implemention has been removed from the binary, and the option ``--use-fake-symbol-table`` is now a no-op with a warning. * thrift_proxy: special characters {'\0', '\r', '\n'} will be stripped from thrift headers. -* watchdog: replaced single watchdog with separate watchdog configuration for worker threads and for the main thread configured via :ref:`Watchdogs `. It works with :ref:`watchdog ` by having the worker thread and main thread watchdogs have same config. +* watchdog: replaced single watchdog with separate watchdog configuration for worker threads and for the main thread configured via :ref:`Watchdogs `. It works with :ref:`watchdog ` by having the worker thread and main thread watchdogs have same config. Bug Fixes --------- @@ -74,7 +74,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * http: removed legacy header sanitization and the runtime guard ``envoy.reloadable_features.strict_header_validation``. * http: removed legacy transfer-encoding enforcement and runtime guard ``envoy.reloadable_features.reject_unsupported_transfer_encodings``. @@ -83,101 +83,101 @@ Removed Config or Runtime New Features ------------ -* access log: added a :ref:`dynamic metadata filter ` for access logs, which filters whether to log based on matching dynamic metadata. -* access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. -* access log: added support for :ref:`%CONNECTION_TERMINATION_DETAILS% ` as a log command operator about why the connection is terminated by Envoy. -* access log: added support for nested objects in :ref:`JSON logging mode `. -* access log: added :ref:`omit_empty_values ` option to omit unset value from formatted log. -* access log: added support for :ref:`%CONNECTION_ID% ` for the downstream connection identifier. -* admin: added :ref:`circuit breakers settings ` information to GET /clusters?format=json :ref:`cluster status `. -* admin: added :ref:`node ` information to GET /server_info :ref:`response object `. -* admin: added the ability to dump init manager unready targets information :ref:`/init_dump ` and :ref:`/init_dump?mask={} `. -* admission control: added the :ref:`admission control ` filter for client-side request throttling. -* build: enable building envoy :ref:`arm64 images ` by buildx tool in x86 CI platform. -* cluster: added new :ref:`connection_pool_per_downstream_connection ` flag, which enable creation of a new connection pool for each downstream connection. +* access log: added a :ref:`dynamic metadata filter ` for access logs, which filters whether to log based on matching dynamic metadata. +* access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. +* access log: added support for :ref:`%CONNECTION_TERMINATION_DETAILS% ` as a log command operator about why the connection is terminated by Envoy. +* access log: added support for nested objects in :ref:`JSON logging mode `. +* access log: added :ref:`omit_empty_values ` option to omit unset value from formatted log. +* access log: added support for :ref:`%CONNECTION_ID% ` for the downstream connection identifier. +* admin: added :ref:`circuit breakers settings ` information to GET /clusters?format=json :ref:`cluster status `. +* admin: added :ref:`node ` information to GET /server_info :ref:`response object `. +* admin: added the ability to dump init manager unready targets information :ref:`/init_dump ` and :ref:`/init_dump?mask={} `. +* admission control: added the :ref:`admission control ` filter for client-side request throttling. +* build: enable building envoy :ref:`arm64 images ` by buildx tool in x86 CI platform. +* cluster: added new :ref:`connection_pool_per_downstream_connection ` flag, which enable creation of a new connection pool for each downstream connection. * decompressor filter: reports compressed and uncompressed bytes in trailers. * dns: added support for doing DNS resolution using Apple's DnsService APIs in Apple platforms (macOS, iOS). This feature is ON by default, and is only configurable via the ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime key. Note that this value is latched during server startup and changing the runtime key is a no-op during the lifetime of the process. -* dns_filter: added support for answering :ref:`service record ` queries. -* dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups ` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters `. -* ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. - The emitted dynamic metadata is set by :ref:`dynamic metadata ` field in a returned :ref:`CheckResponse `. -* ext_authz filter: added :ref:`stat_prefix ` as an optional additional prefix for the statistics emitted from `ext_authz` HTTP filter. -* ext_authz filter: added support for enabling the filter based on :ref:`dynamic metadata `. -* ext_authz filter: added support for letting the authorization server instruct Envoy to remove headers from the original request by setting the new field :ref:`headers_to_remove ` before forwarding it to the upstream. -* ext_authz filter: added support for sending :ref:`raw bytes as request body ` of a gRPC check request by setting :ref:`pack_as_bytes ` to true. -* ext_authz_filter: added :ref:`disable_request_body_buffering ` to disable request data buffering per-route. +* dns_filter: added support for answering :ref:`service record ` queries. +* dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups ` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters `. +* ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. + The emitted dynamic metadata is set by :ref:`dynamic metadata ` field in a returned :ref:`CheckResponse `. +* ext_authz filter: added :ref:`stat_prefix ` as an optional additional prefix for the statistics emitted from `ext_authz` HTTP filter. +* ext_authz filter: added support for enabling the filter based on :ref:`dynamic metadata `. +* ext_authz filter: added support for letting the authorization server instruct Envoy to remove headers from the original request by setting the new field :ref:`headers_to_remove ` before forwarding it to the upstream. +* ext_authz filter: added support for sending :ref:`raw bytes as request body ` of a gRPC check request by setting :ref:`pack_as_bytes ` to true. +* ext_authz_filter: added :ref:`disable_request_body_buffering ` to disable request data buffering per-route. * grpc-json: support specifying ``response_body`` field in for ``google.api.HttpBody`` message. -* hds: added :ref:`cluster_endpoints_health ` to HDS responses, keeping endpoints in the same groupings as they were configured in the HDS specifier by cluster and locality instead of as a flat list. -* hds: added :ref:`transport_socket_matches ` to HDS cluster health check specifier, so the existing match filter :ref:`transport_socket_match_criteria ` in the repeated field :ref:`health_checks ` has context to match against. This unblocks support for health checks over HTTPS and HTTP/2. +* hds: added :ref:`cluster_endpoints_health ` to HDS responses, keeping endpoints in the same groupings as they were configured in the HDS specifier by cluster and locality instead of as a flat list. +* hds: added :ref:`transport_socket_matches ` to HDS cluster health check specifier, so the existing match filter :ref:`transport_socket_match_criteria ` in the repeated field :ref:`health_checks ` has context to match against. This unblocks support for health checks over HTTPS and HTTP/2. * hot restart: added :option:`--socket-path` and :option:`--socket-mode` to configure UDS path in the filesystem and set permission to it. -* http: added HTTP/2 support for :ref:`connection keepalive ` via PING. -* http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. -* http: added :ref:`allow_chunked_length ` configuration option for HTTP/1 codec to allow processing requests/responses with both Content-Length and Transfer-Encoding: chunked headers. If such message is served and option is enabled - per RFC Content-Length is ignored and removed. -* http: added :ref:`CDN Loop filter ` and :ref:`documentation `. -* http: added :ref:`MaxStreamDuration proto ` for configuring per-route downstream duration timeouts. +* http: added HTTP/2 support for :ref:`connection keepalive ` via PING. +* http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. +* http: added :ref:`allow_chunked_length ` configuration option for HTTP/1 codec to allow processing requests/responses with both Content-Length and Transfer-Encoding: chunked headers. If such message is served and option is enabled - per RFC Content-Length is ignored and removed. +* http: added :ref:`CDN Loop filter ` and :ref:`documentation `. +* http: added :ref:`MaxStreamDuration proto ` for configuring per-route downstream duration timeouts. * http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is used by default for HTTP/1.1 and HTTP/2 server connections. The new codecs can be enabled for testing by setting the runtime feature ``envoy.reloadable_features.new_codec_behavior`` to true. The new codecs will be in development for one month, and then enabled by default while the old codecs are deprecated. * http: modified the HTTP header-map data-structure to use an underlying dictionary and a list (no change to the header-map API). To conform with previous versions, the use of a dictionary is currently disabled. It can be enabled by setting the ``envoy.http.headermap.lazy_map_min_size`` runtime feature to a non-negative number which defines the minimal number of headers in a request/response/trailers required for using a dictionary in addition to the list. Our current benchmarks suggest that the value 3 is a good threshold for most workloads. -* load balancer: added :ref:`RingHashLbConfig ` to configure the table size of Maglev consistent hash. -* load balancer: added a :ref:`configuration ` option to specify the active request bias used by the least request load balancer. -* load balancer: added an :ref:`option ` to optimize subset load balancing when there is only one host per subset. -* load balancer: added support for bounded load per host for consistent hash load balancers via :ref:`hash_balance_factor `. -* local_reply config: added :ref:`content_type ` field to set content-type. -* lua: added Lua APIs to access :ref:`SSL connection info ` object. -* lua: added Lua API for :ref:`base64 escaping a string `. -* lua: added Lua API for :ref:`setting the current buffer content `. -* lua: added new :ref:`source_code ` field to support the dispatching of inline Lua code in per route configuration of Lua filter. -* overload management: add :ref:`scaling ` trigger for OverloadManager actions. -* postgres network filter: :ref:`metadata ` is produced based on SQL query. -* proxy protocol: added support for generating the header upstream using :ref:`Proxy Protocol Transport Socket `. -* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. -* ratelimit: added :ref:`per route config ` for rate limit filter. -* ratelimit: added support for optional :ref:`descriptor_key ` to Generic Key action. +* load balancer: added :ref:`RingHashLbConfig ` to configure the table size of Maglev consistent hash. +* load balancer: added a :ref:`configuration ` option to specify the active request bias used by the least request load balancer. +* load balancer: added an :ref:`option ` to optimize subset load balancing when there is only one host per subset. +* load balancer: added support for bounded load per host for consistent hash load balancers via :ref:`hash_balance_factor `. +* local_reply config: added :ref:`content_type ` field to set content-type. +* lua: added Lua APIs to access :ref:`SSL connection info ` object. +* lua: added Lua API for :ref:`base64 escaping a string `. +* lua: added Lua API for :ref:`setting the current buffer content `. +* lua: added new :ref:`source_code ` field to support the dispatching of inline Lua code in per route configuration of Lua filter. +* overload management: add :ref:`scaling ` trigger for OverloadManager actions. +* postgres network filter: :ref:`metadata ` is produced based on SQL query. +* proxy protocol: added support for generating the header upstream using :ref:`Proxy Protocol Transport Socket `. +* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. +* ratelimit: added :ref:`per route config ` for rate limit filter. +* ratelimit: added support for optional :ref:`descriptor_key ` to Generic Key action. * rbac filter: added the name of the matched policy to the response code detail when a request is rejected by the RBAC filter. -* rbac filter: added a log action to the :ref:`RBAC filter ` which sets dynamic metadata to inform access loggers whether to log. -* redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. -* router: added a new :ref:`rate limited retry back off ` strategy that uses headers like `Retry-After` or `X-RateLimit-Reset` to decide the back off interval. +* rbac filter: added a log action to the :ref:`RBAC filter ` which sets dynamic metadata to inform access loggers whether to log. +* redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. +* router: added a new :ref:`rate limited retry back off ` strategy that uses headers like `Retry-After` or `X-RateLimit-Reset` to decide the back off interval. * router: added new - :ref:`envoy-ratelimited ` + :ref:`envoy-ratelimited ` retry policy, which allows retrying envoy's own rate limited responses. -* router: added new :ref:`host_rewrite_path_regex ` +* router: added new :ref:`host_rewrite_path_regex ` option, which allows rewriting Host header based on path. -* router: added support for DYNAMIC_METADATA :ref:`header formatter `. -* router_check_tool: added support for ``request_header_matches``, ``response_header_matches`` to :ref:`router check tool `. +* router: added support for DYNAMIC_METADATA :ref:`header formatter `. +* router_check_tool: added support for ``request_header_matches``, ``response_header_matches`` to :ref:`router check tool `. * signal: added support for calling fatal error handlers without envoy's signal handler, via FatalErrorHandler::callFatalErrorHandlers(). -* stats: added optional histograms to :ref:`cluster stats ` +* stats: added optional histograms to :ref:`cluster stats ` that track headers and body sizes of requests and responses. * stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it. -* tap: added :ref:`generic body matcher ` to scan http requests and responses for text or hex patterns. -* tcp_proxy: added :ref:`max_downstream_connection_duration ` for downstream connection. When max duration is reached the connection will be closed. +* tap: added :ref:`generic body matcher ` to scan http requests and responses for text or hex patterns. +* tcp_proxy: added :ref:`max_downstream_connection_duration ` for downstream connection. When max duration is reached the connection will be closed. * tcp_proxy: allow earlier network filters to set metadataMatchCriteria on the connection StreamInfo to influence load balancing. -* tls: added OCSP stapling support through the :ref:`ocsp_staple ` and :ref:`ocsp_staple_policy ` configuration options. See :ref:`OCSP Stapling ` for usage and runtime flags. -* tls: introduce new :ref:`extension point ` for overriding :ref:`TLS handshaker ` behavior. +* tls: added OCSP stapling support through the :ref:`ocsp_staple ` and :ref:`ocsp_staple_policy ` configuration options. See :ref:`OCSP Stapling ` for usage and runtime flags. +* tls: introduce new :ref:`extension point ` for overriding :ref:`TLS handshaker ` behavior. * tls: switched from using socket BIOs to using custom BIOs that know how to interact with IoHandles. The feature can be disabled by setting runtime feature ``envoy.reloadable_features.tls_use_io_handle_bio`` to false. -* tracing: added ability to set some :ref:`optional segment fields ` in the AWS X-Ray tracer. -* udp_proxy: added :ref:`hash_policies ` to support hash based routing. -* udp_proxy: added :ref:`use_original_src_ip ` option to replicate the downstream remote address of the packets on the upstream side of Envoy. It is similar to :ref:`original source filter `. -* watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter `. -* watchdog: supports an extension point where actions can be registered to fire on watchdog events such as miss, megamiss, kill and multikill. See :ref:`watchdog actions `. -* watchdog: watchdog action extension that does cpu profiling. See :ref:`Profile Action `. -* watchdog: watchdog action extension that sends SIGABRT to the stuck thread to terminate the process. See :ref:`Abort Action `. -* xds: added :ref:`extension config discovery ` support for HTTP filters. +* tracing: added ability to set some :ref:`optional segment fields ` in the AWS X-Ray tracer. +* udp_proxy: added :ref:`hash_policies ` to support hash based routing. +* udp_proxy: added :ref:`use_original_src_ip ` option to replicate the downstream remote address of the packets on the upstream side of Envoy. It is similar to :ref:`original source filter `. +* watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter `. +* watchdog: supports an extension point where actions can be registered to fire on watchdog events such as miss, megamiss, kill and multikill. See :ref:`watchdog actions `. +* watchdog: watchdog action extension that does cpu profiling. See :ref:`Profile Action `. +* watchdog: watchdog action extension that sends SIGABRT to the stuck thread to terminate the process. See :ref:`Abort Action `. +* xds: added :ref:`extension config discovery ` support for HTTP filters. * xds: added support for mixed v2/v3 discovery response, which enable type url downgrade and upgrade. This feature is disabled by default and is controlled by runtime guard ``envoy.reloadable_features.enable_type_url_downgrade_and_upgrade``. * zlib: added option to use `zlib-ng `_ as zlib library. Deprecated ---------- -* build: alpine based debug image is deprecated in favor of :ref:`Ubuntu based debug image `. -* cluster: the :ref:`track_timeout_budgets ` - field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration `. -* ext_authz: the :ref:`dynamic metadata ` field in :ref:`OkHttpResponse ` has been deprecated in favor of :ref:`dynamic metadata ` field in :ref:`CheckResponse `. -* hds: the :ref:`endpoints_health ` - field has been deprecated in favor of :ref:`cluster_endpoints_health ` to maintain +* build: alpine based debug image is deprecated in favor of :ref:`Ubuntu based debug image `. +* cluster: the :ref:`track_timeout_budgets ` + field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration `. +* ext_authz: the :ref:`dynamic metadata ` field in :ref:`OkHttpResponse ` has been deprecated in favor of :ref:`dynamic metadata ` field in :ref:`CheckResponse `. +* hds: the :ref:`endpoints_health ` + field has been deprecated in favor of :ref:`cluster_endpoints_health ` to maintain grouping by cluster and locality. -* router: the :ref:`include_vh_rate_limits ` field has been deprecated in favor of :ref:`vh_rate_limits `. -* router: the :ref:`max_grpc_timeout ` field has been deprecated in favor of :ref:`grpc_timeout_header_max `. -* router: the :ref:`grpc_timeout_offset ` field has been deprecated in favor of :ref:`grpc_timeout_header_offset `. -* tap: the :ref:`match_config ` field has been deprecated in favor of - :ref:`match ` field. +* router: the :ref:`include_vh_rate_limits ` field has been deprecated in favor of :ref:`vh_rate_limits `. +* router: the :ref:`max_grpc_timeout ` field has been deprecated in favor of :ref:`grpc_timeout_header_max `. +* router: the :ref:`grpc_timeout_offset ` field has been deprecated in favor of :ref:`grpc_timeout_header_offset `. +* tap: the :ref:`match_config ` field has been deprecated in favor of + :ref:`match ` field. * router_check_tool: ``request_header_fields``, ``response_header_fields`` config deprecated in favor of ``request_header_matches``, ``response_header_matches``. -* watchdog: :ref:`watchdog ` deprecated in favor of :ref:`watchdogs `. +* watchdog: :ref:`watchdog ` deprecated in favor of :ref:`watchdogs `. diff --git a/docs/root/version_history/v1.16.3.rst b/docs/root/version_history/v1.16.3.rst index 125902e2f67da..cff413ee2be68 100644 --- a/docs/root/version_history/v1.16.3.rst +++ b/docs/root/version_history/v1.16.3.rst @@ -14,7 +14,7 @@ Bug Fixes *Changes expected to improve the state of the world and are unlikely to have negative effects* * aggregate cluster: fixed a crash due to a TLS initialization issue. -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: reverting a behavioral change where upstream connect timeouts were temporarily treated differently from other connection failures. The change back to the original behavior can be temporarily reverted by setting ``envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure`` to false. * lua: fixed crash when Lua script contains streamInfo():downstreamSslConnection(). @@ -24,7 +24,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.16.4.rst b/docs/root/version_history/v1.16.4.rst index 4864d77cc3a7b..ffd25009cf855 100644 --- a/docs/root/version_history/v1.16.4.rst +++ b/docs/root/version_history/v1.16.4.rst @@ -15,12 +15,12 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ -* http: added the ability to :ref:`unescape slash sequences` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. +* http: added the ability to :ref:`unescape slash sequences` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. Deprecated ---------- diff --git a/docs/root/version_history/v1.16.5.rst b/docs/root/version_history/v1.16.5.rst index fcf1017a2cd41..f1b37a301d81b 100644 --- a/docs/root/version_history/v1.16.5.rst +++ b/docs/root/version_history/v1.16.5.rst @@ -25,7 +25,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.17.0.rst b/docs/root/version_history/v1.17.0.rst index 1d612e7573124..b3495a674164b 100644 --- a/docs/root/version_history/v1.17.0.rst +++ b/docs/root/version_history/v1.17.0.rst @@ -14,19 +14,19 @@ Minor Behavior Changes * build: the Alpine based debug images are no longer built in CI, use Ubuntu based images instead. * decompressor: set the default value of window_bits of the decompressor to 15 to be able to decompress responses compressed by a compressor with any window size. * expr filter: added ``connection.termination_details`` property support. -* formatter: the :ref:`text_format ` field no longer requires at least one byte, and may now be the empty string. It has also become :ref:`deprecated `. +* formatter: the :ref:`text_format ` field no longer requires at least one byte, and may now be the empty string. It has also become :ref:`deprecated `. * grpc_web filter: if a ``grpc-accept-encoding`` header is present it's passed as-is to the upstream and if it isn't ``grpc-accept-encoding:identity`` is sent instead. The header was always overwriten with ``grpc-accept-encoding:identity,deflate,gzip`` before. * http: upstream protocol will now only be logged if an upstream stream was established. -* jwt_authn filter: added support of JWT time constraint verification with a clock skew (default to 60 seconds) and added a filter config field :ref:`clock_skew_seconds ` to configure it. -* listener: injection of the :ref:`TLS inspector ` has been disabled by default. This feature is controlled by the runtime guard ``envoy.reloadable_features.disable_tls_inspector_injection``. -* lua: added `always_wrap_body` argument to `body()` API to always return a :ref:`buffer object ` even if the body is empty. +* jwt_authn filter: added support of JWT time constraint verification with a clock skew (default to 60 seconds) and added a filter config field :ref:`clock_skew_seconds ` to configure it. +* listener: injection of the :ref:`TLS inspector ` has been disabled by default. This feature is controlled by the runtime guard ``envoy.reloadable_features.disable_tls_inspector_injection``. +* lua: added `always_wrap_body` argument to `body()` API to always return a :ref:`buffer object ` even if the body is empty. * memory: enabled new tcmalloc with restartable sequences for aarch64 builds. * mongo proxy metrics: swapped network connection remote and local closed counters previously set reversed (``cx_destroy_local_with_active_rq`` and ``cx_destroy_remote_with_active_rq``). -* outlier detection: added :ref:`max_ejection_time ` to limit ejection time growth when a node stays unhealthy for extended period of time. By default :ref:`max_ejection_time ` limits ejection time to 5 minutes. Additionally, when the node stays healthy, ejection time decreases. See :ref:`ejection algorithm ` for more info. Previously, ejection time could grow without limit and never decreased. +* outlier detection: added :ref:`max_ejection_time ` to limit ejection time growth when a node stays unhealthy for extended period of time. By default :ref:`max_ejection_time ` limits ejection time to 5 minutes. Additionally, when the node stays healthy, ejection time decreases. See :ref:`ejection algorithm ` for more info. Previously, ejection time could grow without limit and never decreased. * performance: improved performance when handling large HTTP/1 bodies. * tcp_proxy: now waits for HTTP tunnel to be established before start streaming the downstream data, the runtime guard ``envoy.reloadable_features.http_upstream_wait_connect_response`` can be set to "false" to disable this behavior. * tls: removed RSA key transport and SHA-1 cipher suites from the client-side defaults. -* watchdog: the watchdog action :ref:`abort_action ` is now the default action to terminate the process if watchdog kill / multikill is enabled. +* watchdog: the watchdog action :ref:`abort_action ` is now the default action to terminate the process if watchdog kill / multikill is enabled. * xds: to support TTLs, heartbeating has been added to xDS. As a result, responses that contain empty resources without updating the version will no longer be propagated to the subscribers. To undo this for VHDS (which is the only subscriber that wants empty resources), the ``envoy.reloadable_features.vhds_heartbeats`` can be set to "false". @@ -34,10 +34,10 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* -* config: validate that upgrade configs have a non-empty :ref:`upgrade_type `, fixing a bug where an errant "-" could result in unexpected behavior. +* config: validate that upgrade configs have a non-empty :ref:`upgrade_type `, fixing a bug where an errant "-" could result in unexpected behavior. * dns: fixed a bug where custom resolvers provided in configuration were not preserved after network issues. * dns_filter: correctly associate DNS response IDs when multiple queries are received. -* grpc mux: fixed sending node again after stream is reset when :ref:`set_node_on_first_message_only ` is set. +* grpc mux: fixed sending node again after stream is reset when :ref:`set_node_on_first_message_only ` is set. * http: fixed URL parsing for HTTP/1.1 fully qualified URLs and connect requests containing IPv6 addresses. * http: reject requests with missing required headers after filter chain processing. * http: sending CONNECT_ERROR for HTTP/2 where appropriate during CONNECT requests. @@ -51,70 +51,70 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * dispatcher: removed legacy socket read/write resumption code path and runtime guard ``envoy.reloadable_features.activate_fds_next_event_loop``. -* ext_authz: removed auto ignore case in HTTP-based ``ext_authz`` header matching and the runtime guard ``envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher``. To ignore case, set the :ref:`ignore_case ` field to true. +* ext_authz: removed auto ignore case in HTTP-based ``ext_authz`` header matching and the runtime guard ``envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher``. To ignore case, set the :ref:`ignore_case ` field to true. * ext_authz: the deprecated field ``use_alpha`` is no longer supported and cannot be set anymore. * http: removed ``envoy.reloadable_features.http1_flood_protection`` and legacy code path for turning flood protection off. * http: removed ``envoy.reloadable_features.new_codec_behavior`` and legacy codecs. New Features ------------ -* compression: the :ref:`compressor ` filter added support for compressing request payloads. Its configuration is unified with the :ref:`decompressor ` filter with two new fields for different directions - :ref:`requests ` and :ref:`responses `. The latter deprecates the old response-specific fields and, if used, roots the response-specific stats in `.compressor...response.*` instead of `.compressor...*`. -* config: added ability to flush stats when the admin's :ref:`/stats endpoint ` is hit instead of on a timer via :ref:`stats_flush_on_admin `. +* compression: the :ref:`compressor ` filter added support for compressing request payloads. Its configuration is unified with the :ref:`decompressor ` filter with two new fields for different directions - :ref:`requests ` and :ref:`responses `. The latter deprecates the old response-specific fields and, if used, roots the response-specific stats in `.compressor...response.*` instead of `.compressor...*`. +* config: added ability to flush stats when the admin's :ref:`/stats endpoint ` is hit instead of on a timer via :ref:`stats_flush_on_admin `. * config: added new runtime feature ``envoy.features.enable_all_deprecated_features`` that allows the use of all deprecated features. * crash support: added the ability to dump L4 connection data on crash. -* formatter: added new :ref:`text_format_source ` field to support format strings both inline and from a file. -* formatter: added support for custom date formatting to :ref:`%DOWNSTREAM_PEER_CERT_V_START% ` and :ref:`%DOWNSTREAM_PEER_CERT_V_END% `, similar to :ref:`%START_TIME% `. -* grpc: implemented header value syntax support when defining :ref:`initial metadata ` for gRPC-based `ext_authz` :ref:`HTTP ` and :ref:`network ` filters, and :ref:`ratelimit ` filters. -* grpc-json: added support for configuring :ref:`unescaping behavior ` for path components. -* hds: added support for delta updates in the :ref:`HealthCheckSpecifier `, making only the Endpoints and Health Checkers that changed be reconstructed on receiving a new message, rather than the entire HDS. -* health_check: added option to use :ref:`no_traffic_healthy_interval ` which allows a different no traffic interval when the host is healthy. -* http: added HCM :ref:`request_headers_timeout config field ` to control how long a downstream has to finish sending headers before the stream is cancelled. +* formatter: added new :ref:`text_format_source ` field to support format strings both inline and from a file. +* formatter: added support for custom date formatting to :ref:`%DOWNSTREAM_PEER_CERT_V_START% ` and :ref:`%DOWNSTREAM_PEER_CERT_V_END% `, similar to :ref:`%START_TIME% `. +* grpc: implemented header value syntax support when defining :ref:`initial metadata ` for gRPC-based `ext_authz` :ref:`HTTP ` and :ref:`network ` filters, and :ref:`ratelimit ` filters. +* grpc-json: added support for configuring :ref:`unescaping behavior ` for path components. +* hds: added support for delta updates in the :ref:`HealthCheckSpecifier `, making only the Endpoints and Health Checkers that changed be reconstructed on receiving a new message, rather than the entire HDS. +* health_check: added option to use :ref:`no_traffic_healthy_interval ` which allows a different no traffic interval when the host is healthy. +* http: added HCM :ref:`request_headers_timeout config field ` to control how long a downstream has to finish sending headers before the stream is cancelled. * http: added frame flood and abuse checks to the upstream HTTP/2 codec. This check is off by default and can be enabled by setting the ``envoy.reloadable_features.upstream_http2_flood_checks`` runtime key to true. -* http: added :ref:`stripping any port from host header ` support. -* http: clusters added support for selecting HTTP/1 or HTTP/2 based on ALPN, configurable via :ref:`alpn_config ` in the :ref:`http_protocol_options ` message. -* jwt_authn: added support for :ref:`per-route config `. -* jwt_authn: changed config field :ref:`issuer ` to be optional to comply with JWT `RFC `_ requirements. -* kill_request: added new :ref:`HTTP kill request filter `. -* listener: added an optional :ref:`default filter chain `. If this field is supplied, and none of the :ref:`filter_chains ` matches, this default filter chain is used to serve the connection. -* listener: added back the :ref:`use_original_dst field `. -* listener: added the :ref:`Listener.bind_to_port field `. +* http: added :ref:`stripping any port from host header ` support. +* http: clusters added support for selecting HTTP/1 or HTTP/2 based on ALPN, configurable via :ref:`alpn_config ` in the :ref:`http_protocol_options ` message. +* jwt_authn: added support for :ref:`per-route config `. +* jwt_authn: changed config field :ref:`issuer ` to be optional to comply with JWT `RFC `_ requirements. +* kill_request: added new :ref:`HTTP kill request filter `. +* listener: added an optional :ref:`default filter chain `. If this field is supplied, and none of the :ref:`filter_chains ` matches, this default filter chain is used to serve the connection. +* listener: added back the :ref:`use_original_dst field `. +* listener: added the :ref:`Listener.bind_to_port field `. * log: added a new custom flag ``%_`` to the log pattern to print the actual message to log, but with escaped newlines. -* lua: added `downstreamDirectRemoteAddress()` and `downstreamLocalAddress()` APIs to :ref:`streamInfo() `. -* mongo_proxy: the list of commands to produce metrics for is now :ref:`configurable `. -* network: added a :ref:`transport_socket_connect_timeout config field ` for incoming connections completing transport-level negotiation, including TLS and ALTS hanshakes. -* overload: added :ref:`envoy.overload_actions.reduce_timeouts ` overload action to enable scaling timeouts down with load. Scaling support :ref:`is limited ` to the HTTP connection and stream idle timeouts. -* ratelimit: added support for use of various :ref:`metadata ` as a ratelimit action. -* ratelimit: added :ref:`disable_x_envoy_ratelimited_header ` option to disable `X-Envoy-RateLimited` header. -* ratelimit: added :ref:`body ` field to support custom response bodies for non-OK responses from the external ratelimit service. -* ratelimit: added :ref:`descriptor extensions `. -* ratelimit: added :ref:`computed descriptors `. -* ratelimit: added :ref:`dynamic_metadata ` field to support setting dynamic metadata from the ratelimit service. -* router: added support for regex rewrites during HTTP redirects using :ref:`regex_rewrite `. -* sds: improved support for atomic :ref:`key rotations ` and added configurable rotation triggers for - :ref:`TlsCertificate ` and - :ref:`CertificateValidationContext `. -* signal: added an extension point for custom actions to run on the thread that has encountered a fatal error. Actions are configurable via :ref:`fatal_actions `. -* start_tls: added new :ref:`transport socket ` which starts in clear-text but may programatically be converted to use tls. -* tcp: added a new :ref:`envoy.overload_actions.reject_incoming_connections ` action to reject incoming TCP connections. -* thrift_proxy: added a new :ref:`payload_passthrough ` option to skip decoding body in the Thrift message. +* lua: added `downstreamDirectRemoteAddress()` and `downstreamLocalAddress()` APIs to :ref:`streamInfo() `. +* mongo_proxy: the list of commands to produce metrics for is now :ref:`configurable `. +* network: added a :ref:`transport_socket_connect_timeout config field ` for incoming connections completing transport-level negotiation, including TLS and ALTS hanshakes. +* overload: added :ref:`envoy.overload_actions.reduce_timeouts ` overload action to enable scaling timeouts down with load. Scaling support :ref:`is limited ` to the HTTP connection and stream idle timeouts. +* ratelimit: added support for use of various :ref:`metadata ` as a ratelimit action. +* ratelimit: added :ref:`disable_x_envoy_ratelimited_header ` option to disable `X-Envoy-RateLimited` header. +* ratelimit: added :ref:`body ` field to support custom response bodies for non-OK responses from the external ratelimit service. +* ratelimit: added :ref:`descriptor extensions `. +* ratelimit: added :ref:`computed descriptors `. +* ratelimit: added :ref:`dynamic_metadata ` field to support setting dynamic metadata from the ratelimit service. +* router: added support for regex rewrites during HTTP redirects using :ref:`regex_rewrite `. +* sds: improved support for atomic :ref:`key rotations ` and added configurable rotation triggers for + :ref:`TlsCertificate ` and + :ref:`CertificateValidationContext `. +* signal: added an extension point for custom actions to run on the thread that has encountered a fatal error. Actions are configurable via :ref:`fatal_actions `. +* start_tls: added new :ref:`transport socket ` which starts in clear-text but may programatically be converted to use tls. +* tcp: added a new :ref:`envoy.overload_actions.reject_incoming_connections ` action to reject incoming TCP connections. +* thrift_proxy: added a new :ref:`payload_passthrough ` option to skip decoding body in the Thrift message. * tls: added support for RSA certificates with 4096-bit keys in FIPS mode. -* tracing: added :ref:`SkyWalking tracer `. -* tracing: added support for setting the hostname used when sending spans to a Zipkin collector using the :ref:`collector_hostname ` field. -* xds: added support for resource TTLs. A TTL is specified on the :ref:`Resource `. For SotW, a :ref:`Resource ` can be embedded in the list of resources to specify the TTL. +* tracing: added :ref:`SkyWalking tracer `. +* tracing: added support for setting the hostname used when sending spans to a Zipkin collector using the :ref:`collector_hostname ` field. +* xds: added support for resource TTLs. A TTL is specified on the :ref:`Resource `. For SotW, a :ref:`Resource ` can be embedded in the list of resources to specify the TTL. .. _1_17_deprecated: Deprecated ---------- -* cluster: HTTP configuration for upstream clusters has been reworked. HTTP-specific configuration is now done in the new :ref:`http_protocol_options ` message, configured via the cluster's :ref:`extension_protocol_options `. This replaces explicit HTTP configuration in cluster config, including :ref:`upstream_http_protocol_options ` :ref:`common_http_protocol_options ` :ref:`http_protocol_options ` :ref:`http2_protocol_options ` and :ref:`protocol_selection `. Examples of before-and-after configuration can be found in the :ref:`http_protocol_options docs ` and all of Envoy's example configurations have been updated to the new style of config. -* compression: the fields :ref:`content_length `, :ref:`content_type `, :ref:`disable_on_etag_header `, :ref:`remove_accept_encoding_header ` and :ref:`runtime_enabled ` of the :ref:`Compressor ` message have been deprecated in favor of :ref:`response_direction_config `. -* formatter: :ref:`text_format ` is now deprecated in favor of :ref:`text_format_source `. To migrate existing text format strings, use the :ref:`inline_string ` field. -* gzip: :ref:`HTTP Gzip filter ` is rejected now unless explicitly allowed with :ref:`runtime override ` ``envoy.deprecated_features.allow_deprecated_gzip_http_filter`` set to `true`. Use the :ref:`compressor filter `. -* listener: :ref:`use_proxy_proto ` has been deprecated in favor of adding a :ref:`PROXY protocol listener filter ` explicitly. +* cluster: HTTP configuration for upstream clusters has been reworked. HTTP-specific configuration is now done in the new :ref:`http_protocol_options ` message, configured via the cluster's :ref:`extension_protocol_options `. This replaces explicit HTTP configuration in cluster config, including :ref:`upstream_http_protocol_options ` :ref:`common_http_protocol_options ` :ref:`http_protocol_options ` :ref:`http2_protocol_options ` and :ref:`protocol_selection `. Examples of before-and-after configuration can be found in the :ref:`http_protocol_options docs ` and all of Envoy's example configurations have been updated to the new style of config. +* compression: the fields :ref:`content_length `, :ref:`content_type `, :ref:`disable_on_etag_header `, :ref:`remove_accept_encoding_header ` and :ref:`runtime_enabled ` of the :ref:`Compressor ` message have been deprecated in favor of :ref:`response_direction_config `. +* formatter: :ref:`text_format ` is now deprecated in favor of :ref:`text_format_source `. To migrate existing text format strings, use the :ref:`inline_string ` field. +* gzip: :ref:`HTTP Gzip filter ` is rejected now unless explicitly allowed with :ref:`runtime override ` ``envoy.deprecated_features.allow_deprecated_gzip_http_filter`` set to `true`. Use the :ref:`compressor filter `. +* listener: :ref:`use_proxy_proto ` has been deprecated in favor of adding a :ref:`PROXY protocol listener filter ` explicitly. * logging: the ``--log-format-prefix-with-location`` option is removed. -* ratelimit: the :ref:`dynamic metadata ` action is deprecated in favor of the more generic :ref:`metadata ` action. +* ratelimit: the :ref:`dynamic metadata ` action is deprecated in favor of the more generic :ref:`metadata ` action. * stats: the ``--use-fake-symbol-table`` option is removed. -* tracing: OpenCensus :ref:`Zipkin configuration ` is now deprecated, the preferred Zipkin export is via Envoy's :ref:`native Zipkin tracer `. +* tracing: OpenCensus :ref:`Zipkin configuration ` is now deprecated, the preferred Zipkin export is via Envoy's :ref:`native Zipkin tracer `. diff --git a/docs/root/version_history/v1.17.1.rst b/docs/root/version_history/v1.17.1.rst index 35429d90da7bc..c5caa57673719 100644 --- a/docs/root/version_history/v1.17.1.rst +++ b/docs/root/version_history/v1.17.1.rst @@ -18,7 +18,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.17.2.rst b/docs/root/version_history/v1.17.2.rst index 570447c67a045..07a9947e1e9fc 100644 --- a/docs/root/version_history/v1.17.2.rst +++ b/docs/root/version_history/v1.17.2.rst @@ -13,14 +13,14 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: reverting a behavioral change where upstream connect timeouts were temporarily treated differently from other connection failures. The change back to the original behavior can be temporarily reverted by setting ``envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure`` to false. * tls: fix a crash when peer sends a TLS Alert with an unknown code. Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.17.3.rst b/docs/root/version_history/v1.17.3.rst index 95fff3704c8ec..ee7fdd27fa72d 100644 --- a/docs/root/version_history/v1.17.3.rst +++ b/docs/root/version_history/v1.17.3.rst @@ -15,11 +15,11 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ -* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. Deprecated ---------- diff --git a/docs/root/version_history/v1.17.4.rst b/docs/root/version_history/v1.17.4.rst index 2535eb913a57c..1ef66c482868e 100644 --- a/docs/root/version_history/v1.17.4.rst +++ b/docs/root/version_history/v1.17.4.rst @@ -27,7 +27,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.18.0.rst b/docs/root/version_history/v1.18.0.rst index 70b32883e77c9..254c4cbab0acd 100644 --- a/docs/root/version_history/v1.18.0.rst +++ b/docs/root/version_history/v1.18.0.rst @@ -6,38 +6,38 @@ Incompatible Behavior Changes *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* * config: the v2 xDS API is no longer supported by the Envoy binary. -* grpc_stats: the default value for :ref:`stats_for_all_methods ` is switched from true to false, in order to avoid possible memory exhaustion due to an untrusted downstream sending a large number of unique method names. The previous default value was deprecated in version 1.14.0. This only changes the behavior when the value is not set. The previous behavior can be used by setting the value to true. This behavior change by be overridden by setting runtime feature ``envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default``. +* grpc_stats: the default value for :ref:`stats_for_all_methods ` is switched from true to false, in order to avoid possible memory exhaustion due to an untrusted downstream sending a large number of unique method names. The previous default value was deprecated in version 1.14.0. This only changes the behavior when the value is not set. The previous behavior can be used by setting the value to true. This behavior change by be overridden by setting runtime feature ``envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default``. * http: fixing a standards compliance issue with :scheme. The :scheme header sent upstream is now based on the original URL scheme, rather than set based on the security of the upstream connection. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.preserve_downstream_scheme`` to false. * http: http3 is now enabled/disabled via build option ``--define http3=disabled`` rather than the extension framework. The behavior is the same, but builds may be affected for platforms or build configurations where http3 is not supported. -* http: resolving inconsistencies between :scheme and X-Forwarded-Proto. :scheme will now be set for all HTTP/1.1 requests. This changes the behavior of the gRPC access logger, Wasm filters, CSRF filter and oath2 filter for HTTP/1 traffic, where :scheme was previously not set. This change also validates that for front-line Envoys (Envoys configured with :ref:`xff_num_trusted_hops ` set to 0 and :ref:`use_remote_address ` set to true) that HTTP/1.1 https schemed requests can not be sent over non-TLS connections. All behavioral changes listed here can be temporarily reverted by setting ``envoy.reloadable_features.add_and_validate_scheme_header`` to false. +* http: resolving inconsistencies between :scheme and X-Forwarded-Proto. :scheme will now be set for all HTTP/1.1 requests. This changes the behavior of the gRPC access logger, Wasm filters, CSRF filter and oath2 filter for HTTP/1 traffic, where :scheme was previously not set. This change also validates that for front-line Envoys (Envoys configured with :ref:`xff_num_trusted_hops ` set to 0 and :ref:`use_remote_address ` set to true) that HTTP/1.1 https schemed requests can not be sent over non-TLS connections. All behavioral changes listed here can be temporarily reverted by setting ``envoy.reloadable_features.add_and_validate_scheme_header`` to false. * http: when a protocol error is detected in response from upstream, Envoy sends 502 BadGateway downstream and access log entry contains UPE flag. This behavior change can be overwritten to use error code 503 by setting ``envoy.reloadable_features.return_502_for_upstream_protocol_errors`` to false. Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* access_logs: change command operator %UPSTREAM_CLUSTER% to resolve to :ref:`alt_stat_name ` if provided. This behavior can be reverted by disabling the runtime feature ``envoy.reloadable_features.use_observable_cluster_name``. +* access_logs: change command operator %UPSTREAM_CLUSTER% to resolve to :ref:`alt_stat_name ` if provided. This behavior can be reverted by disabling the runtime feature ``envoy.reloadable_features.use_observable_cluster_name``. * access_logs: fix substition formatter to recognize commands ending with an integer such as DOWNSTREAM_PEER_FINGERPRINT_256. * access_logs: set the error flag ``NC`` for ``no cluster found`` instead of ``NR`` if the route is found but the corresponding cluster is not available. -* admin: added :ref:`observability_name ` information to GET /clusters?format=json :ref:`cluster status `. -* dns: both the :ref:`strict DNS ` and - :ref:`logical DNS ` cluster types now honor the - :ref:`hostname ` field if not empty. +* admin: added :ref:`observability_name ` information to GET /clusters?format=json :ref:`cluster status `. +* dns: both the :ref:`strict DNS ` and + :ref:`logical DNS ` cluster types now honor the + :ref:`hostname ` field if not empty. Previously resolved hosts would have their hostname set to the configured DNS address for use with - logging, :ref:`auto_host_rewrite `, etc. + logging, :ref:`auto_host_rewrite `, etc. Setting the hostname manually allows overriding the internal hostname used for such features while still allowing the original DNS resolution name to be used. * grpc_json_transcoder: the filter now adheres to encoder and decoder buffer limits. Requests and responses that require buffering over the limits will be directly rejected. The behavior can be reverted by disabling runtime feature ``envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits``. - To reduce or increase the buffer limits the filter adheres to, reference the :ref:`flow control documentation `. -* hds: support custom health check port via :ref:`health_check_config `. -* healthcheck: the :ref:`health check filter ` now sends the - :ref:`x-envoy-immediate-health-check-fail ` header + To reduce or increase the buffer limits the filter adheres to, reference the :ref:`flow control documentation `. +* hds: support custom health check port via :ref:`health_check_config `. +* healthcheck: the :ref:`health check filter ` now sends the + :ref:`x-envoy-immediate-health-check-fail ` header for all responses when Envoy is in the health check failed state. Additionally, receiving the - :ref:`x-envoy-immediate-health-check-fail ` - header (either in response to normal traffic or in response to an HTTP :ref:`active health check `) will - cause Envoy to immediately :ref:`exclude ` the host from + :ref:`x-envoy-immediate-health-check-fail ` + header (either in response to normal traffic or in response to an HTTP :ref:`active health check `) will + cause Envoy to immediately :ref:`exclude ` the host from load balancing calculations. This has the useful property that such hosts, which are being explicitly told to disable traffic, will not be counted for panic routing calculations. See the excluded documentation for more information. This behavior can be temporarily reverted by setting @@ -58,12 +58,12 @@ Minor Behavior Changes initial HEADERS frame for the new stream. Before the counter was incrementred when Envoy received response HEADERS frame with the END_HEADERS flag set from upstream server. * lua: added function ``timestamp`` to provide millisecond resolution timestamps by passing in ``EnvoyTimestampResolution.MILLISECOND``. -* oauth filter: added the optional parameter :ref:`auth_scopes ` with default value of 'user' if not provided. This allows this value to be overridden in the Authorization request to the OAuth provider. +* oauth filter: added the optional parameter :ref:`auth_scopes ` with default value of 'user' if not provided. This allows this value to be overridden in the Authorization request to the OAuth provider. * perf: allow reading more bytes per operation from raw sockets to improve performance. -* router: extended custom date formatting to DOWNSTREAM_PEER_CERT_V_START and DOWNSTREAM_PEER_CERT_V_END when using :ref:`custom request/response header formats `. +* router: extended custom date formatting to DOWNSTREAM_PEER_CERT_V_START and DOWNSTREAM_PEER_CERT_V_END when using :ref:`custom request/response header formats `. * router: made the path rewrite available without finalizing headers, so the filter could calculate the current value of the final url. -* tracing: added ``upstream_cluster.name`` tag that resolves to resolve to :ref:`alt_stat_name ` if provided (and otherwise the cluster name). -* udp: configuration has been added for :ref:`GRO ` +* tracing: added ``upstream_cluster.name`` tag that resolves to resolve to :ref:`alt_stat_name ` if provided (and otherwise the cluster name). +* udp: configuration has been added for :ref:`GRO ` which used to be force enabled if the OS supports it. The default is now disabled for server sockets and enabled for client sockets (see the new features section for links). * upstream: host weight changes now cause a full load balancer rebuild as opposed to happening @@ -80,9 +80,9 @@ Bug Fixes * active http health checks: properly handles HTTP/2 GOAWAY frames from the upstream. Previously a GOAWAY frame due to a graceful listener drain could cause improper failed health checks due to streams being refused by the upstream on a connection that is going away. To revert to old GOAWAY handling behavior, set the runtime feature ``envoy.reloadable_features.health_check.graceful_goaway_handling`` to false. * adaptive concurrency: fixed a bug where concurrent requests on different worker threads could update minRTT back-to-back. * buffer: tighten network connection read and write buffer high watermarks in preparation to more careful enforcement of read limits. Buffer high-watermark is now set to the exact configured value; previously it was set to value + 1. -* cdn_loop: check that the entirety of the :ref:`cdn_id ` field is a valid CDN identifier. +* cdn_loop: check that the entirety of the :ref:`cdn_id ` field is a valid CDN identifier. * cds: fix blocking the update for a warming cluster when the update is the same as the active version. -* ext_authz: emit :ref:`CheckResponse.dynamic_metadata ` when the external authorization response has "Denied" check status. +* ext_authz: emit :ref:`CheckResponse.dynamic_metadata ` when the external authorization response has "Denied" check status. * fault injection: stop counting as active fault after delay elapsed. Previously fault injection filter continues to count the injected delay as an active fault even after it has elapsed. This produces incorrect output statistics and impacts the max number of consecutive faults allowed (e.g., for long-lived streams). This change decreases the active fault count when the delay fault is the only active and has gone finished. * filter_chain: fix filter chain matching with the server name as the case-insensitive way. * grpc-web: fix local reply and non-proto-encoded gRPC response handling for small response bodies. This fix can be temporarily reverted by setting ``envoy.reloadable_features.grpc_web_fix_non_proto_encoded_response_handling`` to false. @@ -91,8 +91,8 @@ Bug Fixes * http: avoid grpc-status overwrite on when sending local replies if that field has already been set. * http: disallowing "host:" in request_headers_to_add for behavioral consistency with rejecting :authority header. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.treat_host_like_authority`` to false. * http: fixed an issue where Envoy did not handle peer stream limits correctly, and queued streams in nghttp2 rather than establish new connections. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.improved_stream_limit_handling`` to false. -* http: fixed a bug where setting :ref:`MaxStreamDuration proto ` did not disable legacy timeout defaults. -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a bug where setting :ref:`MaxStreamDuration proto ` did not disable legacy timeout defaults. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: reverting a behavioral change where upstream connect timeouts were temporarily treated differently from other connection failures. The change back to the original behavior can be temporarily reverted by setting ``envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure`` to false. * jwt_authn: reject requests with a proper error if JWT has the wrong issuer when allow_missing is used. Before this change, the requests are accepted. @@ -109,7 +109,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * access_logs: removed legacy unbounded access logs and runtime guard ``envoy.reloadable_features.disallow_unbounded_access_logs``. * dns: removed legacy buggy wildcard matching path and runtime guard ``envoy.reloadable_features.fix_wildcard_matching``. @@ -127,68 +127,68 @@ Removed Config or Runtime New Features ------------ -* access log: added a new :ref:`OpenTelemetry access logger ` extension, allowing a flexible log structure with native Envoy access log formatting. +* access log: added a new :ref:`OpenTelemetry access logger ` extension, allowing a flexible log structure with native Envoy access log formatting. * access log: added the new response flag ``NC`` for upstream cluster not found. The error flag is set when the http or tcp route is found for the request but the cluster is not available. -* access log: added the :ref:`formatters ` extension point for custom formatters (command operators). -* access log: added support for cross platform writing to :ref:`standard output ` and :ref:`standard error `. +* access log: added the :ref:`formatters ` extension point for custom formatters (command operators). +* access log: added support for cross platform writing to :ref:`standard output ` and :ref:`standard error `. * access log: support command operator: %FILTER_CHAIN_NAME% for the downstream tcp and http request. * access log: support command operator: %REQUEST_HEADERS_BYTES%, %RESPONSE_HEADERS_BYTES%, and %RESPONSE_TRAILERS_BYTES%. -* admin: added support for :ref:`access loggers ` to the admin interface. -* composite filter: added new :ref:`composite filter ` that can be used to instantiate different filter configuratios based on matching incoming data. -* compression: add brotli :ref:`compressor ` and :ref:`decompressor `. +* admin: added support for :ref:`access loggers ` to the admin interface. +* composite filter: added new :ref:`composite filter ` that can be used to instantiate different filter configuratios based on matching incoming data. +* compression: add brotli :ref:`compressor ` and :ref:`decompressor `. * compression: extended the compression allow compressing when the content length header is not present. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.enable_compression_without_content_length_header`` to false. * config: add ``envoy.features.fail_on_any_deprecated_feature`` runtime key, which matches the behaviour of compile-time flag ``ENVOY_DISABLE_DEPRECATED_FEATURES``, i.e. use of deprecated fields will cause a crash. -* config: the ``Node`` :ref:`dynamic context parameters ` are populated in discovery requests when set on the server instance. +* config: the ``Node`` :ref:`dynamic context parameters ` are populated in discovery requests when set on the server instance. * dispatcher: supports a stack of ``Envoy::ScopeTrackedObject`` instead of a single tracked object. This will allow Envoy to dump more debug information on crash. -* ext_authz: added :ref:`response_headers_to_add ` to support sending response headers to downstream clients on OK authorization checks via gRPC. -* ext_authz: added :ref:`allowed_client_headers_on_success ` to support sending response headers to downstream clients on OK external authorization checks via HTTP. -* grpc_json_transcoder: added :ref:`request_validation_options ` to reject invalid requests early. +* ext_authz: added :ref:`response_headers_to_add ` to support sending response headers to downstream clients on OK authorization checks via gRPC. +* ext_authz: added :ref:`allowed_client_headers_on_success ` to support sending response headers to downstream clients on OK external authorization checks via HTTP. +* grpc_json_transcoder: added :ref:`request_validation_options ` to reject invalid requests early. * grpc_json_transcoder: filter can now be configured on per-route/per-vhost level as well. Leaving empty list of services in the filter configuration disables transcoding on the specific route. * http: added support for ``Envoy::ScopeTrackedObject`` for HTTP/1 and HTTP/2 dispatching. Crashes while inside the dispatching loop should dump debug information. Furthermore, HTTP/1 and HTTP/2 clients now dumps the originating request whose response from the upstream caused Envoy to crash. -* http: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic, especially if using HTTP/1.1. -* http: added support for stream filters to mutate the cached route set by HCM route resolution. Useful for filters in a filter chain that want to override specific methods/properties of a route. See :ref:`http route mutation ` docs for more information. +* http: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic, especially if using HTTP/1.1. +* http: added support for stream filters to mutate the cached route set by HCM route resolution. Useful for filters in a filter chain that want to override specific methods/properties of a route. See :ref:`http route mutation ` docs for more information. * http: added new runtime config ``envoy.reloadable_features.check_unsupported_typed_per_filter_config``, the default value is true. When the value is true, envoy will reject virtual host-specific typed per filter config when the filter doesn't support it. -* http: added the ability to preserve HTTP/1 header case across the proxy. See the :ref:`header casing ` documentation for more information. +* http: added the ability to preserve HTTP/1 header case across the proxy. See the :ref:`header casing ` documentation for more information. * http: change frame flood and abuse checks to the upstream HTTP/2 codec to ON by default. It can be disabled by setting the ``envoy.reloadable_features.upstream_http2_flood_checks`` runtime key to false. -* http: hash multiple header values instead of only hash the first header value. It can be disabled by setting the ``envoy.reloadable_features.hash_multiple_header_values`` runtime key to false. See the :ref:`HashPolicy's Header configuration ` for more information. +* http: hash multiple header values instead of only hash the first header value. It can be disabled by setting the ``envoy.reloadable_features.hash_multiple_header_values`` runtime key to false. See the :ref:`HashPolicy's Header configuration ` for more information. * json: introduced new JSON parser (https://github.com/nlohmann/json) to replace RapidJSON. The new parser is disabled by default. To test the new RapidJSON parser, enable the runtime feature ``envoy.reloadable_features.remove_legacy_json``. -* kill_request: :ref:`Kill Request ` now supports bidirection killing. -* listener: added an optional :ref:`stat_prefix `. -* loadbalancer: added the ability to specify the hash_key for a host when using a consistent hashing loadbalancer (ringhash, maglev) using the :ref:`LbEndpoint.Metadata ` e.g.: ``"envoy.lb": {"hash_key": "..."}``. +* kill_request: :ref:`Kill Request ` now supports bidirection killing. +* listener: added an optional :ref:`stat_prefix `. +* loadbalancer: added the ability to specify the hash_key for a host when using a consistent hashing loadbalancer (ringhash, maglev) using the :ref:`LbEndpoint.Metadata ` e.g.: ``"envoy.lb": {"hash_key": "..."}``. * log: added a new custom flag ``%j`` to the log pattern to print the actual message to log as JSON escaped string. -* oauth filter: added the optional parameter :ref:`resources `. Set this value to add multiple "resource" parameters in the Authorization request sent to the OAuth provider. This acts as an identifier representing the protected resources the client is requesting a token for. -* original_dst: added support for :ref:`Original Destination ` on Windows. This enables the use of Envoy as a sidecar proxy on Windows. -* overload: add support for scaling :ref:`transport connection timeouts `. This can be used to reduce the TLS handshake timeout in response to overload. -* postgres: added ability to :ref:`terminate SSL `. -* rbac: added :ref:`shadow_rules_stat_prefix ` to allow adding custom prefix to the stats emitted by shadow rules. -* route config: added :ref:`allow_post field ` for allowing POST payload as raw TCP. -* route config: added :ref:`max_direct_response_body_size_bytes ` to set maximum :ref:`direct response body ` size in bytes. If not specified the default remains 4096 bytes. -* server: added *fips_mode* to :ref:`server compilation settings ` related statistic. +* oauth filter: added the optional parameter :ref:`resources `. Set this value to add multiple "resource" parameters in the Authorization request sent to the OAuth provider. This acts as an identifier representing the protected resources the client is requesting a token for. +* original_dst: added support for :ref:`Original Destination ` on Windows. This enables the use of Envoy as a sidecar proxy on Windows. +* overload: add support for scaling :ref:`transport connection timeouts `. This can be used to reduce the TLS handshake timeout in response to overload. +* postgres: added ability to :ref:`terminate SSL `. +* rbac: added :ref:`shadow_rules_stat_prefix ` to allow adding custom prefix to the stats emitted by shadow rules. +* route config: added :ref:`allow_post field ` for allowing POST payload as raw TCP. +* route config: added :ref:`max_direct_response_body_size_bytes ` to set maximum :ref:`direct response body ` size in bytes. If not specified the default remains 4096 bytes. +* server: added *fips_mode* to :ref:`server compilation settings ` related statistic. * server: added :option:`--enable-core-dump` flag to enable core dumps via prctl (Linux-based systems only). -* tcp_proxy: add support for converting raw TCP streams into HTTP/1.1 CONNECT requests. See :ref:`upgrade documentation ` for details. -* tcp_proxy: added a :ref:`use_post field ` for using HTTP POST to proxy TCP streams. -* tcp_proxy: added a :ref:`headers_to_add field ` for setting additional headers to the HTTP requests for TCP proxing. -* thrift_proxy: added a :ref:`max_requests_per_connection field ` for setting maximum requests for per downstream connection. -* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for messagetype counters in request/response. -* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request time histograms. -* tls peer certificate validation: added :ref:`SPIFFE validator ` for supporting isolated multiple trust bundles in a single listener or cluster. -* tracing: added the :ref:`pack_trace_reason ` - field as well as explicit configuration for the built-in :ref:`UuidRequestIdConfig ` +* tcp_proxy: add support for converting raw TCP streams into HTTP/1.1 CONNECT requests. See :ref:`upgrade documentation ` for details. +* tcp_proxy: added a :ref:`use_post field ` for using HTTP POST to proxy TCP streams. +* tcp_proxy: added a :ref:`headers_to_add field ` for setting additional headers to the HTTP requests for TCP proxing. +* thrift_proxy: added a :ref:`max_requests_per_connection field ` for setting maximum requests for per downstream connection. +* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for messagetype counters in request/response. +* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request time histograms. +* tls peer certificate validation: added :ref:`SPIFFE validator ` for supporting isolated multiple trust bundles in a single listener or cluster. +* tracing: added the :ref:`pack_trace_reason ` + field as well as explicit configuration for the built-in :ref:`UuidRequestIdConfig ` request ID implementation. See the trace context propagation :ref:`architecture overview - ` for more information. -* udp: added :ref:`downstream ` and - :ref:`upstream ` statistics for dropped datagrams. -* udp: added :ref:`downstream_socket_config ` + ` for more information. +* udp: added :ref:`downstream ` and + :ref:`upstream ` statistics for dropped datagrams. +* udp: added :ref:`downstream_socket_config ` listener configuration to allow configuration of downstream max UDP datagram size. Also added - :ref:`upstream_socket_config ` + :ref:`upstream_socket_config ` UDP proxy configuration to allow configuration of upstream max UDP datagram size. The defaults for both remain 1500 bytes. * udp: added configuration for :ref:`GRO - `. The default is disabled for - :ref:`downstream sockets ` - and enabled for :ref:`upstream sockets `. + `. The default is disabled for + :ref:`downstream sockets ` + and enabled for :ref:`upstream sockets `. Deprecated ---------- -* admin: :ref:`access_log_path ` is deprecated in favor for :ref:`access loggers `. +* admin: :ref:`access_log_path ` is deprecated in favor for :ref:`access loggers `. diff --git a/docs/root/version_history/v1.18.3.rst b/docs/root/version_history/v1.18.3.rst index 509d0ebc7cf0a..99650a6db06f2 100644 --- a/docs/root/version_history/v1.18.3.rst +++ b/docs/root/version_history/v1.18.3.rst @@ -17,14 +17,14 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * tls: removed `envoy.reloadable_features.tls_use_io_handle_bio` runtime guard and legacy code path. New Features ------------ -* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. Deprecated ---------- diff --git a/docs/root/version_history/v1.18.4.rst b/docs/root/version_history/v1.18.4.rst index 9e66511ff8070..5f107c7f96aaa 100644 --- a/docs/root/version_history/v1.18.4.rst +++ b/docs/root/version_history/v1.18.4.rst @@ -9,7 +9,7 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* http: disable the integration between :ref:`ExtensionWithMatcher ` +* http: disable the integration between :ref:`ExtensionWithMatcher ` and HTTP filters by default to reflects its experimental status. This feature can be enabled by seting `envoy.reloadable_features.experimental_matching_api` to true. * http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of request @@ -31,7 +31,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.19.0.rst b/docs/root/version_history/v1.19.0.rst index 581a535a17df6..16f8c8b86068a 100644 --- a/docs/root/version_history/v1.19.0.rst +++ b/docs/root/version_history/v1.19.0.rst @@ -15,7 +15,7 @@ Minor Behavior Changes * access_log: added new access_log command operator ``%REQUEST_TX_DURATION%``. * access_log: removed extra quotes on metadata string values. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.unquote_log_string_values`` to false. -* admission control: added :ref:`max_rejection_probability ` which defaults to 80%, which means that the upper limit of the default rejection probability of the filter is changed from 100% to 80%. +* admission control: added :ref:`max_rejection_probability ` which defaults to 80%, which means that the upper limit of the default rejection probability of the filter is changed from 100% to 80%. * aws_request_signing: requests are now buffered by default to compute signatures which include the payload hash, making the filter compatible with most AWS services. Previously, requests were never buffered, which only produced correct signatures for requests without a body, or for @@ -23,10 +23,10 @@ Minor Behavior Changes be now be disabled in favor of using unsigned payloads with compatible services via the new ``use_unsigned_payload`` filter option (default false). * cache filter: serve HEAD requests from cache. -* cluster: added default value of 5 seconds for :ref:`connect_timeout `. +* cluster: added default value of 5 seconds for :ref:`connect_timeout `. * dns: changed apple resolver implementation to not reuse the UDS to the local DNS daemon. -* dns cache: the new :ref:`dns_query_timeout ` option has a default of 5s. See below for more information. -* http: disable the integration between :ref:`ExtensionWithMatcher ` +* dns cache: the new :ref:`dns_query_timeout ` option has a default of 5s. See below for more information. +* http: disable the integration between :ref:`ExtensionWithMatcher ` and HTTP filters by default to reflect its experimental status. This feature can be enabled by setting ``envoy.reloadable_features.experimental_matching_api`` to true. * http: replaced setting ``envoy.reloadable_features.strict_1xx_and_204_response_headers`` with settings @@ -38,7 +38,7 @@ Minor Behavior Changes ``envoy.reloadable_features.no_chunked_encoding_header_for_304`` to false. * http: the behavior of the ``present_match`` in route header matcher changed. The value of ``present_match`` was ignored in the past. The new behavior is ``present_match`` is performed when the value is true. An absent match performed when the value is false. Please reference :ref:`present_match `. -* listener: respect the :ref:`connection balance config ` +* listener: respect the :ref:`connection balance config ` defined within the listener where the sockets are redirected to. Clear that field to restore the previous behavior. * listener: when balancing across active listeners and wildcard matching is used, the behavior has been changed to return the listener that matches the IP family type associated with the listener's socket address. Any unexpected behavioral changes can be reverted by setting runtime guard ``envoy.reloadable_features.listener_wildcard_match_ip_family`` to false. * tcp: switched to the new connection pool by default. Any unexpected behavioral changes can be reverted by setting runtime guard ``envoy.reloadable_features.new_tcp_connection_pool`` to false. @@ -49,7 +49,7 @@ Bug Fixes *Changes expected to improve the state of the world and are unlikely to have negative effects* * aws_lambda: if ``payload_passthrough`` is set to ``false``, the downstream response content-type header will now be set from the content-type entry in the JSON response's headers map, if present. -* cluster: fixed the :ref:`cluster stats ` histograms by moving the accounting into the router +* cluster: fixed the :ref:`cluster stats ` histograms by moving the accounting into the router filter. This means that we now properly compute the number of bytes sent as well as handling retries which were previously ignored. * hot_restart: fix double counting of ``server.seconds_until_first_ocsp_response_expiring`` and ``server.days_until_first_cert_expiring`` during hot-restart. This stat was only incorrect until the parent process terminated. * http: fix erroneous handling of invalid nghttp2 frames with the ``NGHTTP2_ERR_REFUSED_STREAM`` error. Prior to the fix, @@ -65,15 +65,15 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * event: removed ``envoy.reloadable_features.activate_timers_next_event_loop`` runtime guard and legacy code path. * gzip: removed legacy HTTP Gzip filter and runtime guard ``envoy.deprecated_features.allow_deprecated_gzip_http_filter``. * http: removed ``envoy.reloadable_features.allow_500_after_100`` runtime guard and the legacy code path. * http: removed ``envoy.reloadable_features.always_apply_route_header_rules`` runtime guard and legacy code path. -* http: removed ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` for disabling closing HTTP/1.1 connections on error. Connection-closing can still be disabled by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message `. +* http: removed ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` for disabling closing HTTP/1.1 connections on error. Connection-closing can still be disabled by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message `. * http: removed ``envoy.reloadable_features.http_set_copy_replace_all_headers`` runtime guard and legacy code paths. -* http: removed ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2``; Envoy will now always send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. +* http: removed ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2``; Envoy will now always send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. * http: removed ``envoy.reloadable_features.http_match_on_all_headers`` runtime guard and legacy code paths. * http: removed ``envoy.reloadable_features.unify_grpc_handling`` runtime guard and legacy code paths. * tls: removed ``envoy.reloadable_features.tls_use_io_handle_bio`` runtime guard and legacy code path. @@ -81,62 +81,62 @@ Removed Config or Runtime New Features ------------ -* access_log: added the new response flag for :ref:`overload manager termination `. The response flag will be set when the http stream is terminated by overload manager. -* admission control: added :ref:`rps_threshold ` option that when average RPS of the sampling window is below this threshold, the filter will not throttle requests. Added :ref:`max_rejection_probability ` option to set an upper limit on the probability of rejection. -* bandwidth_limit: added new :ref:`HTTP bandwidth limit filter `. -* bootstrap: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. By setting the ``resolvers`` the external DNS servers to be used for external DNS queries can be specified. -* cluster: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. -* cluster: added :ref:`host_rewrite_literal ` to WeightedCluster. -* cluster: added :ref:`wait_for_warm_on_init `, which allows cluster readiness to not block on cluster warm-up. It is true by default, which preserves existing behavior. Currently, only applicable for DNS-based clusters. +* access_log: added the new response flag for :ref:`overload manager termination `. The response flag will be set when the http stream is terminated by overload manager. +* admission control: added :ref:`rps_threshold ` option that when average RPS of the sampling window is below this threshold, the filter will not throttle requests. Added :ref:`max_rejection_probability ` option to set an upper limit on the probability of rejection. +* bandwidth_limit: added new :ref:`HTTP bandwidth limit filter `. +* bootstrap: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. By setting the ``resolvers`` the external DNS servers to be used for external DNS queries can be specified. +* cluster: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. +* cluster: added :ref:`host_rewrite_literal ` to WeightedCluster. +* cluster: added :ref:`wait_for_warm_on_init `, which allows cluster readiness to not block on cluster warm-up. It is true by default, which preserves existing behavior. Currently, only applicable for DNS-based clusters. * composite filter: can now be used with filters that also add an access logger, such as the WASM filter. -* config: added stat :ref:`config_reload_time_ms `. -* connection_limit: added new :ref:`Network connection limit filter `. +* config: added stat :ref:`config_reload_time_ms `. +* connection_limit: added new :ref:`Network connection limit filter `. * crash support: restore crash context when continuing to processing requests or responses as a result of an asynchronous callback that invokes a filter directly. This is unlike the call stacks that go through the various network layers, to eventually reach the filter. For a concrete example see: ``Envoy::Extensions::HttpFilters::Cache::CacheFilter::getHeaders`` which posts a callback on the dispatcher that will invoke the filter directly. -* dns cache: added :ref:`preresolve_hostnames ` option to the DNS cache config. This option allows hostnames to be preresolved into the cache upon cache creation. This might provide performance improvement, in the form of cache hits, for hostnames that are going to be resolved during steady state and are known at config load time. -* dns cache: added :ref:`dns_query_timeout ` option to the DNS cache config. This option allows explicitly controlling the timeout of underlying queries independently of the underlying DNS platform implementation. Coupled with success and failure retry policies the use of this timeout will lead to more deterministic DNS resolution times. -* dns resolver: added ``DnsResolverOptions`` protobuf message to reconcile all of the DNS lookup option flags. By setting the configuration option :ref:`use_tcp_for_dns_lookups ` as true we can make the underlying dns resolver library to make only TCP queries to the DNS servers and by setting the configuration option :ref:`no_default_search_domain ` as true the DNS resolver library will not use the default search domains. -* dns resolver: added ``DnsResolutionConfig`` to combine :ref:`dns_resolver_options ` and :ref:`resolvers ` in a single protobuf message. The field ``resolvers`` can be specified with a list of DNS resolver addresses. If specified, DNS client library will perform resolution via the underlying DNS resolvers. Otherwise, the default system resolvers (e.g., /etc/resolv.conf) will be used. -* dns_filter: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting the configuration option ``use_tcp_for_dns_lookups`` to true we can make dns filter's external resolvers to answer queries using TCP only, by setting the configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query which replaces the pre-existing alpha api field ``upstream_resolvers``. -* dynamic_forward_proxy: added :ref:`dns_resolution_config ` option to the DNS cache config in order to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query instead of the system default resolvers. -* ext_authz_filter: added :ref:`bootstrap_metadata_labels_key ` option to configure labels of destination service. +* dns cache: added :ref:`preresolve_hostnames ` option to the DNS cache config. This option allows hostnames to be preresolved into the cache upon cache creation. This might provide performance improvement, in the form of cache hits, for hostnames that are going to be resolved during steady state and are known at config load time. +* dns cache: added :ref:`dns_query_timeout ` option to the DNS cache config. This option allows explicitly controlling the timeout of underlying queries independently of the underlying DNS platform implementation. Coupled with success and failure retry policies the use of this timeout will lead to more deterministic DNS resolution times. +* dns resolver: added ``DnsResolverOptions`` protobuf message to reconcile all of the DNS lookup option flags. By setting the configuration option :ref:`use_tcp_for_dns_lookups ` as true we can make the underlying dns resolver library to make only TCP queries to the DNS servers and by setting the configuration option :ref:`no_default_search_domain ` as true the DNS resolver library will not use the default search domains. +* dns resolver: added ``DnsResolutionConfig`` to combine :ref:`dns_resolver_options ` and :ref:`resolvers ` in a single protobuf message. The field ``resolvers`` can be specified with a list of DNS resolver addresses. If specified, DNS client library will perform resolution via the underlying DNS resolvers. Otherwise, the default system resolvers (e.g., /etc/resolv.conf) will be used. +* dns_filter: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting the configuration option ``use_tcp_for_dns_lookups`` to true we can make dns filter's external resolvers to answer queries using TCP only, by setting the configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query which replaces the pre-existing alpha api field ``upstream_resolvers``. +* dynamic_forward_proxy: added :ref:`dns_resolution_config ` option to the DNS cache config in order to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query instead of the system default resolvers. +* ext_authz_filter: added :ref:`bootstrap_metadata_labels_key ` option to configure labels of destination service. * http: added new field ``is_optional`` to ``extensions.filters.network.http_connection_manager.v3.HttpFilter``. When set to ``true``, unsupported http filters will be ignored by envoy. This is also same with unsupported http filter in the typed per filter config. For more information, please reference - :ref:`HttpFilter `. -* http: added :ref:`scheme options ` for adding or overwriting scheme. -* http: added :ref:`stripping trailing host dot from host header ` support. -* http: added support for :ref:`original IP detection extensions `. - Two initial extensions were added, the :ref:`custom header ` extension and the - :ref:`xff ` extension. -* http: added a new option to upstream HTTP/2 :ref:`keepalive ` to send a PING ahead of a new stream if the connection has been idle for a sufficient duration. -* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. -* http: added upstream and downstream alpha HTTP/3 support! See :ref:`quic_options ` for downstream and the new http3_protocol_options in :ref:`http_protocol_options ` for upstream HTTP/3. + :ref:`HttpFilter `. +* http: added :ref:`scheme options ` for adding or overwriting scheme. +* http: added :ref:`stripping trailing host dot from host header ` support. +* http: added support for :ref:`original IP detection extensions `. + Two initial extensions were added, the :ref:`custom header ` extension and the + :ref:`xff ` extension. +* http: added a new option to upstream HTTP/2 :ref:`keepalive ` to send a PING ahead of a new stream if the connection has been idle for a sufficient duration. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. +* http: added upstream and downstream alpha HTTP/3 support! See :ref:`quic_options ` for downstream and the new http3_protocol_options in :ref:`http_protocol_options ` for upstream HTTP/3. * http: raise max configurable max_request_headers_kb limit to 8192 KiB (8MiB) from 96 KiB in http connection manager. -* input matcher: added a new input matcher that :ref:`matches an IP address against a list of CIDR ranges `. -* jwt_authn: added support to fetch remote jwks asynchronously specified by :ref:`async_fetch `. -* jwt_authn: added support to add padding in the forwarded JWT payload specified by :ref:`pad_forward_payload_header `. +* input matcher: added a new input matcher that :ref:`matches an IP address against a list of CIDR ranges `. +* jwt_authn: added support to fetch remote jwks asynchronously specified by :ref:`async_fetch `. +* jwt_authn: added support to add padding in the forwarded JWT payload specified by :ref:`pad_forward_payload_header `. * listener: added ability to change an existing listener's address. -* listener: added filter chain match support for :ref:`direct source address `. -* local_rate_limit_filter: added suppoort for locally rate limiting http requests on a per connection basis. This can be enabled by setting the :ref:`local_rate_limit_per_downstream_connection ` field to true. -* metric service: added support for sending metric tags as labels. This can be enabled by setting the :ref:`emit_tags_as_labels ` field to true. -* proxy protocol: added support for generating the header while using the :ref:`HTTP connection manager `. This is done using the :ref:`Proxy Protocol Transport Socket ` on upstream clusters. +* listener: added filter chain match support for :ref:`direct source address `. +* local_rate_limit_filter: added support for locally rate limiting http requests on a per connection basis. This can be enabled by setting the :ref:`local_rate_limit_per_downstream_connection ` field to true. +* metric service: added support for sending metric tags as labels. This can be enabled by setting the :ref:`emit_tags_as_labels ` field to true. +* proxy protocol: added support for generating the header while using the :ref:`HTTP connection manager `. This is done using the :ref:`Proxy Protocol Transport Socket ` on upstream clusters. This feature is currently affected by a memory leak `issue `_. * req_without_query: added access log formatter extension implementing command operator :ref:`REQ_WITHOUT_QUERY ` to log the request path, while excluding the query string. * router: added option ``suppress_grpc_request_failure_code_stats`` to :ref:`the router ` to allow users to exclude incrementing HTTP status code stats on gRPC requests. -* stats: added native :ref:`Graphite-formatted tag ` support. -* tcp: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic. -* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request and response size histograms. -* thrift_proxy: added support for :ref:`outlier detection `. +* stats: added native :ref:`Graphite-formatted tag ` support. +* tcp: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic. +* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request and response size histograms. +* thrift_proxy: added support for :ref:`outlier detection `. * tls: allow dual ECDSA/RSA certs via SDS. Previously, SDS only supported a single certificate per context, and dual cert was only supported via non-SDS. -* tracing: add option :ref:`use_request_id_for_trace_sampling ` which allows configuring whether to perform sampling based on :ref:`x-request-id` or not. -* udp_proxy: added :ref:`key ` as another hash policy to support hash based routing on any given key. +* tracing: add option :ref:`use_request_id_for_trace_sampling ` which allows configuring whether to perform sampling based on :ref:`x-request-id` or not. +* udp_proxy: added :ref:`key ` as another hash policy to support hash based routing on any given key. * windows container image: added user, EnvoyUser which is part of the Network Configuration Operators group to the container image. Deprecated ---------- -* bootstrap: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. -* cluster: the fields :ref:`use_tcp_for_dns_lookups ` and :ref:`dns_resolvers ` are deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. -* dns_filter: the field :ref:`known_suffixes ` is deprecated. The internal data management of the filter has changed and the filter no longer uses the known_suffixes field. -* dynamic_forward_proxy: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. -* http: :ref:`xff_num_trusted_hops ` is deprecated in favor of :ref:`original IP detection extensions`. +* bootstrap: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. +* cluster: the fields :ref:`use_tcp_for_dns_lookups ` and :ref:`dns_resolvers ` are deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. +* dns_filter: the field :ref:`known_suffixes ` is deprecated. The internal data management of the filter has changed and the filter no longer uses the known_suffixes field. +* dynamic_forward_proxy: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. +* http: :ref:`xff_num_trusted_hops ` is deprecated in favor of :ref:`original IP detection extensions`. diff --git a/docs/root/version_history/v1.2.0.rst b/docs/root/version_history/v1.2.0.rst index eb54093f69063..ca42c5df855be 100644 --- a/docs/root/version_history/v1.2.0.rst +++ b/docs/root/version_history/v1.2.0.rst @@ -4,27 +4,27 @@ Changes ------- -* :ref:`Cluster discovery service (CDS) API `. -* :ref:`Outlier detection ` (passive health checking). +* :ref:`Cluster discovery service (CDS) API `. +* :ref:`Outlier detection ` (passive health checking). * Envoy configuration is now checked against a JSON schema. -* :ref:`Ring hash ` consistent load balancer, as well as HTTP +* :ref:`Ring hash ` consistent load balancer, as well as HTTP consistent hash routing based on a policy. -* Vastly :ref:`enhanced global rate limit configuration ` via the HTTP +* Vastly :ref:`enhanced global rate limit configuration ` via the HTTP rate limiting filter. * HTTP routing to a cluster retrieved from a header. * Weighted cluster HTTP routing. * Auto host rewrite during HTTP routing. * Regex header matching during HTTP routing. * HTTP access log runtime filter. -* LightStep tracer :ref:`parent/child span association `. -* :ref:`Route discovery service (RDS) API `. +* LightStep tracer :ref:`parent/child span association `. +* :ref:`Route discovery service (RDS) API `. * HTTP router :ref:`x-envoy-upstream-rq-timeout-alt-response header - ` support. -* *use_original_dst* and *bind_to_port* :ref:`listener options ` (useful for + ` support. +* *use_original_dst* and *bind_to_port* :ref:`listener options ` (useful for iptables based transparent proxy support). -* TCP proxy filter :ref:`route table support `. +* TCP proxy filter :ref:`route table support `. * Configurable stats flush interval. -* Various :ref:`third party library upgrades `, including using BoringSSL as +* Various :ref:`third party library upgrades `, including using BoringSSL as the default SSL provider. * No longer maintain closed HTTP/2 streams for priority calculations. Leads to substantial memory savings for large meshes. diff --git a/docs/root/version_history/v1.20.0.rst b/docs/root/version_history/v1.20.0.rst new file mode 100644 index 0000000000000..1378e06c09abc --- /dev/null +++ b/docs/root/version_history/v1.20.0.rst @@ -0,0 +1,189 @@ +1.20.0 (October 5, 2021) +======================== + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +* config: due to the switch to using work-in-progress annotations and warnings to indicate APIs + subject to change, the following API packages have been force migrated from ``v3alpha`` to ``v3``: + ``envoy.extensions.access_loggers.open_telemetry.v3``, + ``envoy.extensions.cache.simple_http_cache.v3``, + ``envoy.extensions.filters.http.admission_control.v3``, + ``envoy.extensions.filters.http.bandwidth_limit.v3``, + ``envoy.extensions.filters.http.cache.v3``, + ``envoy.extensions.filters.http.cdn_loop.v3``, + ``envoy.extensions.filters.http.ext_proc.v3``, + ``envoy.extensions.filters.http.oauth2.v3``, + ``envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3``, + ``envoy.extensions.filters.udp.dns_filter.v3``, + ``envoy.extensions.transport_sockets.s2a.v3``, + ``envoy.extensions.watchdog.profile_action.v3``, + ``envoy.service.ext_proc.v3``, and + ``envoy.watchdog.v3``. If your production deployment was using one of these APIs, you will be + forced to potentially vendor the old proto file to continue serving old versions of Envoy. + The project realizes this is unfortunate because some of these are known to be used in production, + however the project does not have the resources to undergo a migration in which we support + ``v3alpha`` and ``v3`` at the same time. The switch to using work-in-progress annotations with + clear and explicit warnings will avoid any such issue in the future. We apologize again for any + difficulty this change causes, though it is for the best. Additionally, some of the above + namespaces have had their work-in-progress annotations removed due to known production usage. + Thus, they will not warn and are offered full API stability support by the project from this + point forward. +* config: the ``--bootstrap-version`` CLI flag has been removed, Envoy has only been able to accept v3 + bootstrap configurations since 1.18.0. +* contrib: the :ref:`squash filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`kafka broker filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`RocketMQ proxy filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`Postgres proxy filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`MySQL proxy filter ` has been moved to + :ref:`contrib images `. +* dns_filter: :ref:`dns_filter ` + protobuf fields have been renumbered to restore compatibility with Envoy + 1.18, breaking compatibility with Envoy 1.19.0 and 1.19.1. The new field + numbering allows control planes supporting Envoy 1.18 to gracefully upgrade to + :ref:`dns_resolution_config `, + provided they skip over Envoy 1.19.0 and 1.19.1. + Control planes upgrading from Envoy 1.19.0 and 1.19.1 will need to + vendor the corresponding protobuf definitions to ensure that the + renumbered fields have the types expected by those releases. +* extensions: deprecated extension names now default to triggering a configuration error. + The previous warning-only behavior may be temporarily reverted by setting the runtime key + ``envoy.deprecated_features.allow_deprecated_extension_names`` to true. + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* client_ssl_auth filter: now sets additional termination details and ``UAEX`` response flag when the client certificate is not in the allowed-list. +* config: configuration files ending in .yml now load as YAML. +* config: configuration file extensions now ignore case when deciding the file type. E.g., .JSON files load as JSON. +* config: reduced log level for "Unable to establish new stream" xDS logs to debug. The log level + for "gRPC config stream closed" is now reduced to debug when the status is ``Ok`` or has been + retriable (``DeadlineExceeded``, ``ResourceExhausted``, or ``Unavailable``) for less than 30 + seconds. +* config: use of work-in-progress API files, messages, or fields will now generate an explicit + warning. Please read the text about ``(xds.annotations.v3.file_status).work_in_progress``, + ``(xds.annotations.v3.message_status).work_in_progress``, and + ``(xds.annotations.v3.field_status).work_in_progress`` + `here `_ for more information. Some + APIs that are known to be implicitly not work-in-progress have been force migrated and are + individually indicated elsewhere in the release notes. A server-wide ``wip_protos`` counter has + also been added in :ref:`server statistics ` to track this. +* ext_authz: fixed skipping authentication when returning either a direct response or a redirect. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect`` runtime guard to false. +* grpc: gRPC async client can be cached and shared across filter instances in the same thread, this feature is turned off by default, can be turned on by setting runtime guard ``envoy.reloadable_features.enable_grpc_async_client_cache`` to true. +* http: correct the use of the ``x-forwarded-proto`` header and the ``:scheme`` header. Where they differ + (which is rare) ``:scheme`` will now be used for serving redirect URIs and cached content. This behavior + can be reverted by setting runtime guard ``correct_scheme_and_xfp`` to false. +* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of the request + URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed + to stripping the #fragment instead by setting the runtime guard ``envoy.reloadable_features.http_reject_path_with_fragment`` + to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard + ``envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled``. This runtime guard must only be set + to false when existing non-compliant traffic relies on #fragment in URI. When this option is enabled, Envoy request + authorization extensions may be bypassed. This override and its associated behavior will be decommissioned after the standard deprecation period. +* http: set the default :ref:`lazy headermap threshold ` to 3, + which defines the minimal number of headers in a request/response/trailers required for using a + dictionary in addition to the list. Setting the ``envoy.http.headermap.lazy_map_min_size`` runtime + feature to a non-negative number will override the default value. +* http: stop processing pending H/2 frames if connection transitioned to a closed state. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.skip_dispatching_frames_for_closed_connection`` to false. +* listener: added the :ref:`enable_reuse_port ` + field and changed the default for ``reuse_port`` from false to true, as the feature is now well + supported on the majority of production Linux kernels in use. The default change is aware of the hot + restart, as otherwise, the change would not be backward compatible between restarts. This means + that hot restarting onto a new binary will retain the default of false until the binary undergoes + a full restart. To retain the previous behavior, either explicitly set the new configuration + field to false, or set the runtime feature flag ``envoy.reloadable_features.listener_reuse_port_default_enabled`` + to false. As part of this change, the use of ``reuse_port`` for TCP listeners on both macOS and + Windows has been disabled due to suboptimal behavior. See the field documentation for more + information. +* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in-place update. +* quic: enables IETF connection migration. This feature requires a stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. +* thrift_proxy: allow Framed and Header transport combinations to perform :ref:`payload passthrough `. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* access log: fix ``%UPSTREAM_CLUSTER%`` when used in http upstream access logs. Previously, it was always logging as an unset value. +* aws request signer: fix the AWS Request Signer extension to correctly normalize the path and query string to be signed according to AWS' guidelines, so that the hash on the server side matches. See `AWS SigV4 documentation `_. +* cluster: delete pools when they're idle to fix unbounded memory use when using PROXY protocol upstream with tcp_proxy. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.conn_pool_delete_when_idle`` runtime guard to false. +* cluster: finish cluster warming even if hosts are removed before health check initialization. This only affected clusters with :ref:`ignore_health_on_host_removal `. +* compressor: fix a bug where if trailers were added and a subsequent filter paused the filter chain, the request could be stalled. This behavior can be reverted by setting ``envoy.reloadable_features.fix_added_trailers`` to false. +* dynamic forward proxy: fixing a validation bug where san and sni checks were not applied setting :ref:`http_protocol_options ` via :ref:`typed_extension_protocol_options `. +* ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. +* ext_authz: fix the use of ``append`` field of :ref:`response_headers_to_add ` to set or append encoded response headers from a gRPC auth server. +* ext_authz: fix the HTTP ext_authz filter to respond with ``403 Forbidden`` when a gRPC auth server sends a denied check response with an empty HTTP status code. +* ext_authz: the network ext_authz filter now correctly sets dynamic metadata returned by the authorization service for non-OK responses. This behavior now matches the http ext_authz filter. +* hcm: remove deprecation for :ref:`xff_num_trusted_hops ` and forbid mixing ip detection extensions with old related knobs. +* http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. +* listener: fixed an issue on Windows where connections are not handled by all worker threads. +* lua: fix ``BodyBuffer`` setting a Lua string and printing Lua string containing hex characters. Previously, ``BodyBuffer`` setting a Lua string or printing strings with hex characters will be truncated. +* xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under the 'annotations' section of the segment data. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +* http: removed ``envoy.reloadable_features.http_upstream_wait_connect_response`` runtime guard and legacy code paths. +* http: removed ``envoy.reloadable_features.allow_preconnect`` runtime guard and legacy code paths. +* listener: removed ``envoy.reloadable_features.disable_tls_inspector_injection`` runtime guard and legacy code paths. +* ocsp: removed ``envoy.reloadable_features.check_ocsp_policy deprecation`` runtime guard and legacy code paths. +* ocsp: removed ``envoy.reloadable_features.require_ocsp_response_for_must_staple_certs deprecation`` and legacy code paths. +* quic: removed ``envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing`` runtime guard. + +New Features +------------ +* access_log: added :ref:`METADATA` token to handle all types of metadata (DYNAMIC, CLUSTER, ROUTE). +* bootstrap: added :ref:`inline_headers ` in the bootstrap to make custom inline headers bootstrap configurable. +* contrib: added new :ref:`contrib images ` which contain contrib extensions. +* dns: added :ref:`V4_PREFERRED ` option to return V6 addresses only if V4 addresses are not available. +* ext_authz: added :ref:`dynamic_metadata_from_headers ` to support emitting dynamic metadata from headers returned by an external authorization service via HTTP. +* grpc reverse bridge: added a new :ref:`option ` to support streaming response bodies when withholding gRPC frames from the upstream. +* grpc_json_transcoder: added support to unescape '+' in query parameters to space with a new config field :ref:`query_param_unescape_plus `. +* http: added cluster_header in :ref:`weighted_clusters ` to allow routing to the weighted cluster specified in the request_header. +* http: added :ref:`alternate_protocols_cache_options ` for enabling HTTP/3 connections to servers which advertise HTTP/3 support via `HTTP Alternative Services `_ and caching the advertisements to disk. +* http: added :ref:`string_match ` in the header matcher. +* http: added :ref:`x-envoy-upstream-stream-duration-ms ` that allows configuring the max stream duration via a request header. +* http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. +* http: sanitizing the referer header as documented :ref:`here `. This feature can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.sanitize_http_header_referer`` to false. +* http: validating outgoing HTTP/2 CONNECT requests to ensure that if ``:path`` is set that ``:protocol`` is present. This behavior can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.validate_connect`` to false. +* jwt_authn: added support for :ref:`Jwt Cache ` and its size can be specified by :ref:`jwt_cache_size `. +* jwt_authn: added support for extracting JWTs from request cookies using :ref:`from_cookies `. +* jwt_authn: added support for setting the extracted headers from a successfully verified JWT using :ref:`header_in_metadata ` to dynamic metadata. +* listener: new listener metric ``downstream_cx_transport_socket_connect_timeout`` to track transport socket timeouts. +* lua: added ``header:getAtIndex()`` and ``header:getNumValues()`` methods to :ref:`header object ` for retrieving the value of a header at certain index and get the total number of values for a given header. +* matcher: added :ref:`invert ` for inverting the match result in the metadata matcher. +* overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via :ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. +* rbac: added :ref:`destination_port_range ` for matching range of destination ports. +* rbac: added :ref:`matcher` along with extension category ``extension_category_envoy.rbac.matchers`` for custom RBAC permission matchers. Added reference implementation for matchers :ref:`envoy.rbac.matchers.upstream_ip_port `. +* route config: added :ref:`dynamic_metadata ` for routing based on dynamic metadata. +* router: added retry options predicate extensions configured via :ref:`retry_options_predicates. ` These extensions allow modification of requests between retries at the router level. There are not currently any built-in extensions that implement this extension point. +* router: added :ref:`per_try_idle_timeout ` timeout configuration. +* router: added an optional :ref:`override_auto_sni_header ` to support setting SNI value from an arbitrary header other than host/authority. +* sxg_filter: added filter to transform response to SXG package to :ref:`contrib images `. This can be enabled by setting :ref:`SXG ` configuration. +* thrift_proxy: added support for :ref:`mirroring requests `. +* udp: allows updating filter chain in-place through LDS, which is supported by Quic listener. Such listener config will be rejected in other connection-less UDP listener implementations. It can be reverted by ``envoy.reloadable_features.udp_listener_updates_filter_chain_in_place``. +* udp: disallow L4 filter chain in config which configures connection-less UDP listener. It can be reverted by ``envoy.reloadable_features.udp_listener_updates_filter_chain_in_place``. +* upstream: added support for :ref:`slow start mode `, which allows to progresively increase traffic for new endpoints. +* upstream: extended :ref:`Round Robin load balancer configuration ` with :ref:`slow start ` support. +* upstream: extended :ref:`Least Request load balancer configuration ` with :ref:`slow start ` support. +* windows: added a new container image based on Windows Nanoserver 2022. +* xray: request direction (``ingress`` or ``egress``) is recorded as X-Ray trace segment's annotation by name ``direction``. + +Deprecated +---------- + +* api: the :ref:`matcher ` field has been deprecated in favor of + :ref:`matcher ` in order to break a build dependency. +* cluster: :ref:`max_requests_per_connection ` is deprecated in favor of :ref:`max_requests_per_connection `. +* http: the HeaderMatcher fields :ref:`exact_match `, :ref:`safe_regex_match `, + :ref:`prefix_match `, :ref:`suffix_match ` and + :ref:`contains_match ` are deprecated by :ref:`string_match `. +* listener: :ref:`reuse_port ` has been + deprecated in favor of :ref:`enable_reuse_port `. + At the same time, the default has been changed from false to true. See above for more information. diff --git a/docs/root/version_history/v1.3.0.rst b/docs/root/version_history/v1.3.0.rst index 234be4dbacad3..969dfa127f4a8 100644 --- a/docs/root/version_history/v1.3.0.rst +++ b/docs/root/version_history/v1.3.0.rst @@ -5,21 +5,21 @@ Changes ------- * As of this release, we now have an official :repo:`breaking change policy - `. Note that there are numerous breaking configuration + `. Note that there are numerous breaking configuration changes in this release. They are not listed here. Future releases will adhere to the policy and have clear documentation on deprecations and changes. * Bazel is now the canonical build system (replacing CMake). There have been a huge number of changes to the development/build/test flow. See :repo:`/bazel/README.md` and :repo:`/ci/README.md` for more information. -* :ref:`Outlier detection ` has been expanded to include success +* :ref:`Outlier detection ` has been expanded to include success rate variance, and all parameters are now configurable in both runtime and in the JSON configuration. * TCP level listener and cluster connections now have configurable receive buffer limits at which point connection level back pressure is applied. Full end to end flow control will be available in a future release. -* :ref:`Redis health checking ` has been added as an active +* :ref:`Redis health checking ` has been added as an active health check type. Full Redis support will be documented/supported in 1.4.0. -* :ref:`TCP health checking ` now supports a +* :ref:`TCP health checking ` now supports a "connect only" mode that only checks if the remote server can be connected to without writing/reading any data. * `BoringSSL `_ is now the only supported TLS provider. @@ -31,36 +31,36 @@ Changes configurations by default. Use ``include_vh_rate_limits`` to inherit the virtual host level options if desired. * HTTP routes can now add request headers on a per route and per virtual host basis via the - :ref:`request_headers_to_add ` option. -* The :ref:`example configurations ` have been refreshed to demonstrate the + :ref:`request_headers_to_add ` option. +* The :ref:`example configurations ` have been refreshed to demonstrate the latest features. * ``per_try_timeout_ms`` can now be configured in a route's retry policy in addition to via the :ref:`x-envoy-upstream-rq-per-try-timeout-ms - ` HTTP header. + ` HTTP header. * HTTP virtual host matching now includes support for prefix wildcard domains (e.g., ``*.lyft.com``). * The default for tracing random sampling has been changed to 100% and is still configurable in - :ref:`runtime `. + :ref:`runtime `. * HTTP tracing configuration has been extended to allow tags to be populated from arbitrary HTTP headers. -* The :ref:`HTTP rate limit filter ` can now be applied to internal, +* The :ref:`HTTP rate limit filter ` can now be applied to internal, external, or all requests via the ``request_type`` option. -* :ref:`Listener binding ` now requires specifying an `address` field. This can be +* :ref:`Listener binding ` now requires specifying an `address` field. This can be used to bind a listener to both a specific address as well as a port. -* The :ref:`MongoDB filter ` now emits a stat for queries that +* The :ref:`MongoDB filter ` now emits a stat for queries that do not have ``$maxTimeMS`` set. -* The :ref:`MongoDB filter ` now emits logs that are fully valid +* The :ref:`MongoDB filter ` now emits logs that are fully valid JSON. * The CPU profiler output path is now configurable. * A watchdog system has been added that can kill the server if a deadlock is detected. -* A :ref:`route table checking tool ` has been added that can +* A :ref:`route table checking tool ` has been added that can be used to test route tables before use. -* We have added an :ref:`example repo ` that shows how to compile/link a custom filter. +* We have added an :ref:`example repo ` that shows how to compile/link a custom filter. * Added additional cluster wide information related to outlier detection to the :ref:`/clusters - admin endpoint `. + admin endpoint `. * Multiple SANs can now be verified via the ``verify_subject_alt_name`` setting. Additionally, URI type SANs can be verified. * HTTP filters can now be passed opaque configuration specified on a per route basis. * By default Envoy now has a built in crash handler that will print a back trace. This behavior can be disabled if desired via the ``--define=signal_trace=disabled`` Bazel option. -* Zipkin has been added as a supported :ref:`tracing provider `. +* Zipkin has been added as a supported :ref:`tracing provider `. * Numerous small changes and fixes not listed here. diff --git a/docs/root/version_history/v1.4.0.rst b/docs/root/version_history/v1.4.0.rst index 3342e4faee549..f736fbdf6a433 100644 --- a/docs/root/version_history/v1.4.0.rst +++ b/docs/root/version_history/v1.4.0.rst @@ -4,42 +4,42 @@ Changes ------- -* macOS is :repo:`now supported `. (A few features +* macOS is :repo:`now supported `. (A few features are missing such as hot restart and original destination routing). * YAML is now directly supported for config files. * Added /routes admin endpoint. * End-to-end flow control is now supported for TCP proxy, HTTP/1, and HTTP/2. HTTP flow control that includes filter buffering is incomplete and will be implemented in 1.5.0. -* Log verbosity :repo:`compile time flag ` added. -* Hot restart :repo:`compile time flag ` added. -* Original destination :ref:`cluster ` - and :ref:`load balancer ` added. -* :ref:`WebSocket ` is now supported. +* Log verbosity :repo:`compile time flag ` added. +* Hot restart :repo:`compile time flag ` added. +* Original destination :ref:`cluster ` + and :ref:`load balancer ` added. +* :ref:`WebSocket ` is now supported. * Virtual cluster priorities have been hard removed without deprecation as we are reasonably sure no one is using this feature. * Route ``validate_clusters`` option added. -* :ref:`x-envoy-downstream-service-node ` +* :ref:`x-envoy-downstream-service-node ` header added. -* :ref:`x-forwarded-client-cert ` header +* :ref:`x-forwarded-client-cert ` header added. * Initial HTTP/1 forward proxy support for absolute URLs has been added. * HTTP/2 codec settings are now configurable. -* gRPC/JSON transcoder :ref:`filter ` added. -* gRPC web :ref:`filter ` added. +* gRPC/JSON transcoder :ref:`filter ` added. +* gRPC web :ref:`filter ` added. * Configurable timeout for the rate limit service call in the :ref:`network - ` and :ref:`HTTP ` rate limit + ` and :ref:`HTTP ` rate limit filters. -* :ref:`x-envoy-retry-grpc-on ` header added. -* :ref:`LDS API ` added. +* :ref:`x-envoy-retry-grpc-on ` header added. +* :ref:`LDS API ` added. * TLS :``require_client_certificate`` option added. -* :ref:`Configuration check tool ` added. -* :ref:`JSON schema check tool ` added. +* :ref:`Configuration check tool ` added. +* :ref:`JSON schema check tool ` added. * Config validation mode added via the :option:`--mode` option. * :option:`--local-address-ip-version` option added. * IPv6 support is now complete. * UDP ``statsd_ip_address`` option added. * Per-cluster DNS resolvers added. -* :ref:`Fault filter ` enhancements and fixes. +* :ref:`Fault filter ` enhancements and fixes. * Several features are `deprecated as of the 1.4.0 release `_. They will be removed at the beginning of the 1.5.0 release cycle. We explicitly call out that the ``HttpFilterConfigFactory`` filter API has been deprecated in favor of diff --git a/docs/root/version_history/v1.5.0.rst b/docs/root/version_history/v1.5.0.rst index 58e1d3147f7b9..6986e34b84e72 100644 --- a/docs/root/version_history/v1.5.0.rst +++ b/docs/root/version_history/v1.5.0.rst @@ -5,66 +5,66 @@ Changes ------- * access log: added fields for :ref:`UPSTREAM_LOCAL_ADDRESS and DOWNSTREAM_ADDRESS - `. -* admin: added :ref:`JSON output ` for stats admin endpoint. -* admin: added basic :ref:`Prometheus output ` for stats admin + `. +* admin: added :ref:`JSON output ` for stats admin endpoint. +* admin: added basic :ref:`Prometheus output ` for stats admin endpoint. Histograms are not currently output. -* admin: added ``version_info`` to the :ref:`/clusters admin endpoint `. -* config: the :ref:`v2 API ` is now considered production ready. +* admin: added ``version_info`` to the :ref:`/clusters admin endpoint `. +* config: the :ref:`v2 API ` is now considered production ready. * config: added --v2-config-only CLI flag. -* cors: added :ref:`CORS filter `. +* cors: added :ref:`CORS filter `. * health check: added :ref:`x-envoy-immediate-health-check-fail - ` header support. -* health check: added :ref:`reuse_connection ` option. -* http: added :ref:`per-listener stats `. + ` header support. +* health check: added :ref:`reuse_connection ` option. +* http: added :ref:`per-listener stats `. * http: end-to-end HTTP flow control is now complete across both connections, streams, and filters. -* load balancer: added :ref:`subset load balancer `. +* load balancer: added :ref:`subset load balancer `. * load balancer: added ring size and hash :ref:`configuration options - `. This used to be configurable via runtime. The runtime + `. This used to be configurable via runtime. The runtime configuration was deleted without deprecation as we are fairly certain no one is using it. * log: added the ability to optionally log to a file instead of stderr via the :option:`--log-path` option. -* listeners: added :ref:`drain_type ` option. -* lua: added experimental :ref:`Lua filter `. -* mongo filter: added :ref:`fault injection `. -* mongo filter: added :ref:`"drain close" ` support. -* outlier detection: added :ref:`HTTP gateway failure type `. +* listeners: added :ref:`drain_type ` option. +* lua: added experimental :ref:`Lua filter `. +* mongo filter: added :ref:`fault injection `. +* mongo filter: added :ref:`"drain close" ` support. +* outlier detection: added :ref:`HTTP gateway failure type `. See `deprecated log `_ for outlier detection stats deprecations in this release. -* redis: the :ref:`redis proxy filter ` is now considered +* redis: the :ref:`redis proxy filter ` is now considered production ready. -* redis: added :ref:`"drain close" ` functionality. -* router: added :ref:`x-envoy-overloaded ` support. -* router: added :ref:`regex ` route matching. -* router: added :ref:`custom request headers ` +* redis: added :ref:`"drain close" ` functionality. +* router: added :ref:`x-envoy-overloaded ` support. +* router: added :ref:`regex ` route matching. +* router: added :ref:`custom request headers ` for upstream requests. * router: added :ref:`downstream IP hashing - ` for HTTP ketama routing. -* router: added :ref:`cookie hashing `. -* router: added :ref:`start_child_span ` option + ` for HTTP ketama routing. +* router: added :ref:`cookie hashing `. +* router: added :ref:`start_child_span ` option to create child span for egress calls. -* router: added optional :ref:`upstream logs `. +* router: added optional :ref:`upstream logs `. * router: added complete :ref:`custom append/override/remove support - ` of request/response headers. + ` of request/response headers. * router: added support to :ref:`specify response code during redirect - `. -* router: added :ref:`configuration ` + `. +* router: added :ref:`configuration ` to return either a 404 or 503 if the upstream cluster does not exist. -* runtime: added :ref:`comment capability `. +* runtime: added :ref:`comment capability `. * server: change default log level (:option:`-l`) to ``info``. * stats: maximum stat/name sizes and maximum number of stats are now variable via the ``--max-obj-name-len`` and ``--max-stats`` options. -* tcp proxy: added :ref:`access logging `. +* tcp proxy: added :ref:`access logging `. * tcp proxy: added :ref:`configurable connect retries - `. -* tcp proxy: enable use of :ref:`outlier detector `. -* tls: added :ref:`SNI support `. + `. +* tcp proxy: enable use of :ref:`outlier detector `. +* tls: added :ref:`SNI support `. * tls: added support for specifying :ref:`TLS session ticket keys - `. + `. * tls: allow configuration of the :ref:`min - ` and :ref:`max - ` TLS protocol versions. -* tracing: added :ref:`custom trace span decorators `. + ` and :ref:`max + ` TLS protocol versions. +* tracing: added :ref:`custom trace span decorators `. * Many small bug fixes and performance improvements not listed. Deprecated diff --git a/docs/root/version_history/v1.6.0.rst b/docs/root/version_history/v1.6.0.rst index 406ca33b4da9e..20d525f0ba6d0 100644 --- a/docs/root/version_history/v1.6.0.rst +++ b/docs/root/version_history/v1.6.0.rst @@ -5,121 +5,121 @@ Changes ------- * access log: added DOWNSTREAM_REMOTE_ADDRESS, DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, and - DOWNSTREAM_LOCAL_ADDRESS :ref:`access log formatters `. + DOWNSTREAM_LOCAL_ADDRESS :ref:`access log formatters `. DOWNSTREAM_ADDRESS access log formatter has been deprecated. * access log: added less than or equal (LE) :ref:`comparison filter - `. + `. * access log: added configuration to :ref:`runtime filter - ` to set default sampling rate, divisor, + ` to set default sampling rate, divisor, and whether to use independent randomness or not. -* admin: added :ref:`/runtime ` admin endpoint to read the +* admin: added :ref:`/runtime ` admin endpoint to read the current runtime values. * build: added support for :repo:`building Envoy with exported symbols - `. This change allows scripts loaded with the Lua filter to + `. This change allows scripts loaded with the Lua filter to load shared object libraries such as those installed via `LuaRocks `_. * config: added support for sending error details as `grpc.rpc.Status `_ - in :ref:`DiscoveryRequest `. -* config: added support for :ref:`inline delivery ` of TLS + in :ref:`DiscoveryRequest `. +* config: added support for :ref:`inline delivery ` of TLS certificates and private keys. -* config: added restrictions for the backing :ref:`config sources ` +* config: added restrictions for the backing :ref:`config sources ` of xDS resources. For filesystem based xDS the file must exist at configuration time. For cluster based xDS the backing cluster must be statically defined and be of non-EDS type. * grpc: the Google gRPC C++ library client is now supported as specified in the :ref:`gRPC services - overview ` and :ref:`GrpcService `. + overview ` and :ref:`GrpcService `. * grpc-json: added support for :ref:`inline descriptors - `. -* health check: added :ref:`gRPC health check ` + `. +* health check: added :ref:`gRPC health check ` based on `grpc.health.v1.Health `_ service. * health check: added ability to set :ref:`host header value - ` for http health check. + ` for http health check. * health check: extended the health check filter to support computation of the health check response based on the :ref:`percentage of healthy servers in upstream clusters - `. + `. * health check: added setting for :ref:`no-traffic - interval `. + interval `. * http: added idle timeout for :ref:`upstream http connections - `. + `. * http: added support for :ref:`proxying 100-Continue responses - `. + `. * http: added the ability to pass a URL encoded PEM encoded peer certificate in the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header. * http: added support for trusting additional hops in the :ref:`config_http_conn_man_headers_x-forwarded-for` request header. * http: added support for :ref:`incoming HTTP/1.0 - `. + `. * hot restart: added SIGTERM propagation to children to :ref:`hot-restarter.py - `, which enables using it as a parent of containers. -* ip tagging: added :ref:`HTTP IP Tagging filter `. + `, which enables using it as a parent of containers. +* ip tagging: added :ref:`HTTP IP Tagging filter `. * listeners: added support for :ref:`listening for both IPv4 and IPv6 - ` when binding to ::. + ` when binding to ::. * listeners: added support for listening on :ref:`UNIX domain sockets - `. -* listeners: added support for :ref:`abstract unix domain sockets ` on + `. +* listeners: added support for :ref:`abstract unix domain sockets ` on Linux. The abstract namespace can be used by prepending '@' to a socket path. * load balancer: added cluster configuration for :ref:`healthy panic threshold - ` percentage. -* load balancer: added :ref:`Maglev ` consistent hash + ` percentage. +* load balancer: added :ref:`Maglev ` consistent hash load balancer. * load balancer: added support for - :ref:`LocalityLbEndpoints ` priorities. -* lua: added headers :ref:`replace() ` API. -* lua: extended to support :ref:`metadata object ` API. -* redis: added local `PING` support to the :ref:`Redis filter `. + :ref:`LocalityLbEndpoints ` priorities. +* lua: added headers :ref:`replace() ` API. +* lua: extended to support :ref:`metadata object ` API. +* redis: added local `PING` support to the :ref:`Redis filter `. * redis: added ``GEORADIUS_RO`` and ``GEORADIUSBYMEMBER_RO`` to the :ref:`Redis command splitter - ` allowlist. + ` allowlist. * router: added DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, DOWNSTREAM_LOCAL_ADDRESS, DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT, PROTOCOL, and UPSTREAM_METADATA :ref:`header - formatters `. The CLIENT_IP header formatter + formatters `. The CLIENT_IP header formatter has been deprecated. -* router: added gateway-error :ref:`retry-on ` policy. +* router: added gateway-error :ref:`retry-on ` policy. * router: added support for route matching based on :ref:`URL query string parameters - `. + `. * router: added support for more granular weighted cluster routing by allowing the :ref:`total_weight - ` to be specified in configuration. + ` to be specified in configuration. * router: added support for :ref:`custom request/response headers - ` with mixed static and dynamic values. -* router: added support for :ref:`direct responses `. + ` with mixed static and dynamic values. +* router: added support for :ref:`direct responses `. I.e., sending a preconfigured HTTP response without proxying anywhere. * router: added support for :ref:`HTTPS redirects - ` on specific routes. + ` on specific routes. * router: added support for :ref:`prefix_rewrite - ` for redirects. + ` for redirects. * router: added support for :ref:`stripping the query string - ` for redirects. + ` for redirects. * router: added support for downstream request/upstream response - :ref:`header manipulation ` in :ref:`weighted - cluster `. + :ref:`header manipulation ` in :ref:`weighted + cluster `. * router: added support for :ref:`range based header matching - ` for request routing. -* squash: added support for the :ref:`Squash microservices debugger `. + ` for request routing. +* squash: added support for the :ref:`Squash microservices debugger `. Allows debugging an incoming request to a microservice in the mesh. * stats: added metrics service API implementation. -* stats: added native :ref:`DogStatsd ` support. +* stats: added native :ref:`DogStatsd ` support. * stats: added support for :ref:`fixed stats tag values - ` which will be added to all metrics. + ` which will be added to all metrics. * tcp proxy: added support for specifying a :ref:`metadata matcher - ` for upstream + ` for upstream clusters in the tcp filter. * tcp proxy: improved TCP proxy to correctly proxy TCP half-close. * tcp proxy: added :ref:`idle timeout - `. + `. * tcp proxy: access logs now bring an IP address without a port when using DOWNSTREAM_ADDRESS. - Use :ref:`DOWNSTREAM_REMOTE_ADDRESS ` instead. + Use :ref:`DOWNSTREAM_REMOTE_ADDRESS ` instead. * tracing: added support for dynamically loading an :ref:`OpenTracing tracer - `. + `. * tracing: when using the Zipkin tracer, it is now possible for clients to specify the sampling - decision (using the :ref:`x-b3-sampled ` header) and + decision (using the :ref:`x-b3-sampled ` header) and have the decision propagated through to subsequently invoked services. * tracing: when using the Zipkin tracer, it is no longer necessary to propagate the - :ref:`x-ot-span-context ` header. - See more on trace context propagation :ref:`here `. + :ref:`x-ot-span-context ` header. + See more on trace context propagation :ref:`here `. * transport sockets: added transport socket interface to allow custom implementations of transport sockets. A transport socket provides read and write logic with buffer encryption and decryption (if applicable). The existing TLS implementation has been refactored with the interface. * upstream: added support for specifying an :ref:`alternate stats name - ` while emitting stats for clusters. + ` while emitting stats for clusters. * Many small bug fixes and performance improvements not listed. Deprecated diff --git a/docs/root/version_history/v1.7.0.rst b/docs/root/version_history/v1.7.0.rst index 8be132e66643c..3776551a11955 100644 --- a/docs/root/version_history/v1.7.0.rst +++ b/docs/root/version_history/v1.7.0.rst @@ -6,154 +6,154 @@ Changes * access log: added ability to log response trailers. * access log: added ability to format START_TIME. -* access log: added DYNAMIC_METADATA :ref:`access log formatter `. -* access log: added :ref:`HeaderFilter ` +* access log: added DYNAMIC_METADATA :ref:`access log formatter `. +* access log: added :ref:`HeaderFilter ` to filter logs based on request headers. * access log: added ``%([1-9])?f`` as one of START_TIME specifiers to render subseconds. * access log: gRPC Access Log Service (ALS) support added for :ref:`HTTP access logs - `. + `. * access log: improved WebSocket logging. * admin: added :http:get:`/config_dump` for dumping the current configuration and associated xDS version information (if applicable). * admin: added :http:get:`/clusters?format=json` for outputing a JSON-serialized proto detailing the current status of all clusters. * admin: added :http:get:`/stats/prometheus` as an alternative endpoint for getting stats in prometheus format. -* admin: added :ref:`/runtime_modify endpoint ` to add or change runtime values. +* admin: added :ref:`/runtime_modify endpoint ` to add or change runtime values. * admin: mutations must be sent as POSTs, rather than GETs. Mutations include: :http:post:`/cpuprofiler`, :http:post:`/healthcheck/fail`, :http:post:`/healthcheck/ok`, :http:post:`/logging`, :http:post:`/quitquitquit`, :http:post:`/reset_counters`, :http:post:`/runtime_modify?key1=value1&key2=value2&keyN=valueN`. -* admin: removed ``/routes`` endpoint; route configs can now be found at the :ref:`/config_dump endpoint `. +* admin: removed ``/routes`` endpoint; route configs can now be found at the :ref:`/config_dump endpoint `. * buffer filter: the buffer filter can be optionally - :ref:`disabled ` or - :ref:`overridden ` with + :ref:`disabled ` or + :ref:`overridden ` with route-local configuration. * cli: added --config-yaml flag to the Envoy binary. When set its value is interpreted as a yaml representation of the bootstrap config and overrides --config-path. -* cluster: added :ref:`option ` +* cluster: added :ref:`option ` to close tcp_proxy upstream connections when health checks fail. -* cluster: added :ref:`option ` to drain +* cluster: added :ref:`option ` to drain connections from hosts after they are removed from service discovery, regardless of health status. * cluster: fixed bug preventing the deletion of all endpoints in a priority * debug: added symbolized stack traces (where supported) * ext-authz filter: added support to raw HTTP authorization. * ext-authz filter: added support to gRPC responses to carry HTTP attributes. * grpc: support added for the full set of :ref:`Google gRPC call credentials - `. -* gzip filter: added :ref:`stats ` to the filter. + `. +* gzip filter: added :ref:`stats ` to the filter. * gzip filter: sending *accept-encoding* header as *identity* no longer compresses the payload. * health check: added ability to set :ref:`additional HTTP headers - ` for HTTP health check. + ` for HTTP health check. * health check: added support for EDS delivered :ref:`endpoint health status - `. + `. * health check: added interval overrides for health state transitions from :ref:`healthy to unhealthy - `, :ref:`unhealthy to healthy - ` and for subsequent checks on - :ref:`unhealthy hosts `. -* health check: added support for :ref:`custom health check `. + `, :ref:`unhealthy to healthy + ` and for subsequent checks on + :ref:`unhealthy hosts `. +* health check: added support for :ref:`custom health check `. * health check: health check connections can now be configured to use http/2. * health check http filter: added - :ref:`generic header matching ` + :ref:`generic header matching ` to trigger health check response. Deprecated the endpoint option. * http: filters can now optionally support - :ref:`virtual host `, - :ref:`route `, and - :ref:`weighted cluster ` + :ref:`virtual host `, + :ref:`route `, and + :ref:`weighted cluster ` local configuration. * http: added the ability to pass DNS type Subject Alternative Names of the client certificate in the - :ref:`v1.7.0:config_http_conn_man_headers_x-forwarded-client-cert` header. + :ref:`v1.7:config_http_conn_man_headers_x-forwarded-client-cert` header. * http: local responses to gRPC requests are now sent as trailers-only gRPC responses instead of plain HTTP responses. Notably the HTTP response code is always "200" in this case, and the gRPC error code is carried in "grpc-status" header, optionally accompanied with a text message in "grpc-message" header. * http: added support for :ref:`via header - ` + ` append. * http: added a :ref:`configuration option - ` + ` to elide *x-forwarded-for* header modifications. * http: fixed a bug in inline headers where addCopy and addViaMove didn't add header values when encountering inline headers with multiple instances. -* listeners: added :ref:`tcp_fast_open_queue_length ` option. -* listeners: added the ability to match :ref:`FilterChain ` using - :ref:`application_protocols ` +* listeners: added :ref:`tcp_fast_open_queue_length ` option. +* listeners: added the ability to match :ref:`FilterChain ` using + :ref:`application_protocols ` (e.g. ALPN for TLS protocol). -* listeners: ``sni_domains`` has been deprecated/renamed to :ref:`server_names `. +* listeners: ``sni_domains`` has been deprecated/renamed to :ref:`server_names `. * listeners: removed restriction on all filter chains having identical filters. * load balancer: added :ref:`weighted round robin - ` support. The round robin + ` support. The round robin scheduler now respects endpoint weights and also has improved fidelity across picks. * load balancer: :ref:`locality weighted load balancing - ` is now supported. + ` is now supported. * load balancer: ability to configure zone aware load balancer settings :ref:`through the API - `. + `. * load balancer: the :ref:`weighted least request - ` load balancing algorithm has been improved + ` load balancing algorithm has been improved to have better balance when operating in weighted mode. * logger: added the ability to optionally set the log format via the :option:`--log-format` option. -* logger: all :ref:`logging levels ` can be configured +* logger: all :ref:`logging levels ` can be configured at run-time: trace debug info warning error critical. -* rbac http filter: a :ref:`role-based access control http filter ` has been added. +* rbac http filter: a :ref:`role-based access control http filter ` has been added. * router: the behavior of per-try timeouts have changed in the case where a portion of the response has already been proxied downstream when the timeout occurs. Previously, the response would be reset leading to either an HTTP/2 reset or an HTTP/1 closed connection and a partial response. Now, the timeout will be ignored and the response will continue to proxy up to the global request timeout. -* router: changed the behavior of :ref:`source IP routing ` +* router: changed the behavior of :ref:`source IP routing ` to ignore the source port. -* router: added an :ref:`prefix_match ` match type +* router: added an :ref:`prefix_match ` match type to explicitly match based on the prefix of a header value. -* router: added an :ref:`suffix_match ` match type +* router: added an :ref:`suffix_match ` match type to explicitly match based on the suffix of a header value. -* router: added an :ref:`present_match ` match type +* router: added an :ref:`present_match ` match type to explicitly match based on a header's presence. -* router: added an :ref:`invert_match ` config option +* router: added an :ref:`invert_match ` config option which supports inverting all other match types to match based on headers which are not a desired value. -* router: allow :ref:`cookie routing ` to +* router: allow :ref:`cookie routing ` to generate session cookies. * router: added START_TIME as one of supported variables in :ref:`header - formatters `. -* router: added a :ref:`max_grpc_timeout ` + formatters `. +* router: added a :ref:`max_grpc_timeout ` config option to specify the maximum allowable value for timeouts decoded from gRPC header field ``grpc-timeout``. * router: added a :ref:`configuration option - ` to disable *x-envoy-* + ` to disable *x-envoy-* header generation. * router: added 'unavailable' to the retriable gRPC status codes that can be specified - through :ref:`x-envoy-retry-grpc-on `. -* sockets: added :ref:`tap transport socket extension ` to support + through :ref:`x-envoy-retry-grpc-on `. +* sockets: added :ref:`tap transport socket extension ` to support recording plain text traffic and PCAP generation. * sockets: added ``IP_FREEBIND`` socket option support for :ref:`listeners - ` and upstream connections via + ` and upstream connections via :ref:`cluster manager wide - ` and - :ref:`cluster specific ` options. + ` and + :ref:`cluster specific ` options. * sockets: added ``IP_TRANSPARENT`` socket option support for :ref:`listeners - `. + `. * sockets: added ``SO_KEEPALIVE`` socket option for upstream connections - :ref:`per cluster `. + :ref:`per cluster `. * stats: added support for histograms. -* stats: added :ref:`option to configure the statsd prefix `. +* stats: added :ref:`option to configure the statsd prefix `. * stats: updated stats sink interface to flush through a single call. * tls: added support for - :ref:`verify_certificate_spki `. + :ref:`verify_certificate_spki `. * tls: added support for multiple - :ref:`verify_certificate_hash ` + :ref:`verify_certificate_hash ` values. * tls: added support for using - :ref:`verify_certificate_spki ` - and :ref:`verify_certificate_hash ` - without :ref:`trusted_ca `. + :ref:`verify_certificate_spki ` + and :ref:`verify_certificate_hash ` + without :ref:`trusted_ca `. * tls: added support for allowing expired certificates with - :ref:`allow_expired_certificate `. -* tls: added support for :ref:`renegotiation ` + :ref:`allow_expired_certificate `. +* tls: added support for :ref:`renegotiation ` when acting as a client. * tls: removed support for legacy SHA-2 CBC cipher suites. * tracing: the sampling decision is now delegated to the tracers, allowing the tracer to decide when and if - to use it. For example, if the :ref:`x-b3-sampled ` header + to use it. For example, if the :ref:`x-b3-sampled ` header is supplied with the client request, its value will override any sampling decision made by the Envoy proxy. * websocket: support configuring idle_timeout and max_connect_attempts. -* upstream: added support for host override for a request in :ref:`Original destination host request header `. -* header to metadata: added :ref:`HTTP Header to Metadata filter `. +* upstream: added support for host override for a request in :ref:`Original destination host request header `. +* header to metadata: added :ref:`HTTP Header to Metadata filter `. Deprecated ---------- diff --git a/docs/root/version_history/v1.8.0.rst b/docs/root/version_history/v1.8.0.rst index 4f1d07b22a87f..d6cf45d5703d3 100644 --- a/docs/root/version_history/v1.8.0.rst +++ b/docs/root/version_history/v1.8.0.rst @@ -4,72 +4,72 @@ Changes ------- -* access log: added :ref:`response flag filter ` +* access log: added :ref:`response flag filter ` to filter based on the presence of Envoy response flags. * access log: added RESPONSE_DURATION and RESPONSE_TX_DURATION. * access log: added REQUESTED_SERVER_NAME for SNI to tcp_proxy and http * admin: added :http:get:`/hystrix_event_stream` as an endpoint for monitoring envoy's statistics through `Hystrix dashboard `_. -* cli: added support for :ref:`component log level ` command line option for configuring log levels of individual components. -* cluster: added :ref:`option ` to merge +* cli: added support for :ref:`component log level ` command line option for configuring log levels of individual components. +* cluster: added :ref:`option ` to merge health check/weight/metadata updates within the given duration. * config: regex validation added to limit to a maximum of 1024 characters. * config: v1 disabled by default. v1 support remains available until October via flipping --v2-config-only=false. * config: v1 disabled by default. v1 support remains available until October via deprecated flag --allow-deprecated-v1-api. -* config: fixed stat inconsistency between xDS and ADS implementation. :ref:`update_failure ` - stat is incremented in case of network failure and :ref:`update_rejected ` stat is incremented +* config: fixed stat inconsistency between xDS and ADS implementation. :ref:`update_failure ` + stat is incremented in case of network failure and :ref:`update_rejected ` stat is incremented in case of schema/validation error. -* config: added a stat :ref:`connected_state ` that indicates current connected state of Envoy with +* config: added a stat :ref:`connected_state ` that indicates current connected state of Envoy with management server. -* ext_authz: added support for configuring additional :ref:`authorization headers ` +* ext_authz: added support for configuring additional :ref:`authorization headers ` to be sent from Envoy to the authorization service. -* fault: added support for fractional percentages in :ref:`FaultDelay ` - and in :ref:`FaultAbort `. +* fault: added support for fractional percentages in :ref:`FaultDelay ` + and in :ref:`FaultAbort `. * grpc-json: added support for building HTTP response from `google.api.HttpBody `_. -* health check: added support for :ref:`custom health check `. -* health check: added support for :ref:`specifying jitter as a percentage `. -* health_check: added support for :ref:`health check event logging `. -* health_check: added :ref:`timestamp ` - to the :ref:`health check event ` definition. -* health_check: added support for specifying :ref:`custom request headers ` +* health check: added support for :ref:`custom health check `. +* health check: added support for :ref:`specifying jitter as a percentage `. +* health_check: added support for :ref:`health check event logging `. +* health_check: added :ref:`timestamp ` + to the :ref:`health check event ` definition. +* health_check: added support for specifying :ref:`custom request headers ` to HTTP health checker requests. * http: added support for a :ref:`per-stream idle timeout - `. This applies at both :ref:`connection manager - ` - and :ref:`per-route granularity `. The timeout + `. This applies at both :ref:`connection manager + ` + and :ref:`per-route granularity `. The timeout defaults to 5 minutes; if you have other timeouts (e.g. connection idle timeout, upstream response per-retry) that are longer than this in duration, you may want to consider setting a non-default per-stream idle timeout. -* http: added upstream_rq_completed counter for :ref:`total requests completed ` to dynamic HTTP counters. -* http: added downstream_rq_completed counter for :ref:`total requests completed `, including on a :ref:`per-listener basis `. +* http: added upstream_rq_completed counter for :ref:`total requests completed ` to dynamic HTTP counters. +* http: added downstream_rq_completed counter for :ref:`total requests completed `, including on a :ref:`per-listener basis `. * http: added generic :ref:`Upgrade support - `. + `. * http: better handling of HEAD requests. Now sending transfer-encoding: chunked rather than content-length: 0. * http: fixed missing support for appending to predefined inline headers, e.g. *authorization*, in features that interact with request and response headers, e.g. :ref:`request_headers_to_add - `. For example, a + `. For example, a request header *authorization: token1* will appear as *authorization: token1,token2*, after having :ref:`request_headers_to_add - ` with *authorization: + ` with *authorization: token2* applied. * http: response filters not applied to early error paths such as http_parser generated 400s. * http: restrictions added to reject *:*-prefixed pseudo-headers in :ref:`custom - request headers `. -* http: :ref:`hpack_table_size ` now controls + request headers `. +* http: :ref:`hpack_table_size ` now controls dynamic table size of both: encoder and decoder. * http: added support for removing request headers using :ref:`request_headers_to_remove - `. -* http: added support for a :ref:`delayed close timeout ` to mitigate race conditions when closing connections to downstream HTTP clients. The timeout defaults to 1 second. + `. +* http: added support for a :ref:`delayed close timeout ` to mitigate race conditions when closing connections to downstream HTTP clients. The timeout defaults to 1 second. * jwt-authn filter: add support for per route JWT requirements. -* listeners: added the ability to match :ref:`FilterChain ` using - :ref:`destination_port ` and - :ref:`prefix_ranges `. -* lua: added :ref:`connection() ` wrapper and *ssl()* API. -* lua: added :ref:`streamInfo() ` wrapper and *protocol()* API. -* lua: added :ref:`streamInfo():dynamicMetadata() ` API. -* network: introduced :ref:`sni_cluster ` network filter that forwards connections to the +* listeners: added the ability to match :ref:`FilterChain ` using + :ref:`destination_port ` and + :ref:`prefix_ranges `. +* lua: added :ref:`connection() ` wrapper and *ssl()* API. +* lua: added :ref:`streamInfo() ` wrapper and *protocol()* API. +* lua: added :ref:`streamInfo():dynamicMetadata() ` API. +* network: introduced :ref:`sni_cluster ` network filter that forwards connections to the upstream cluster specified by the SNI value presented by the client during a TLS handshake. * proxy_protocol: added support for HAProxy Proxy Protocol v2 (AF_INET/AF_INET6 only). * ratelimit: added support for :repo:`api/envoy/service/ratelimit/v2/rls.proto`. @@ -77,25 +77,25 @@ Changes Envoy can use either proto to send client requests to a ratelimit server with the use of the ``use_data_plane_proto`` boolean flag in the ratelimit configuration. Support for the legacy proto ``source/common/ratelimit/ratelimit.proto`` is deprecated and will be removed at the start of the 1.9.0 release cycle. -* ratelimit: added :ref:`failure_mode_deny ` option to control traffic flow in +* ratelimit: added :ref:`failure_mode_deny ` option to control traffic flow in case of rate limit service error. -* rbac config: added a :ref:`principal_name ` field and +* rbac config: added a :ref:`principal_name ` field and removed the old ``name`` field to give more flexibility for matching certificate identity. -* rbac network filter: a :ref:`role-based access control network filter ` has been added. -* rest-api: added ability to set the :ref:`request timeout ` for REST API requests. +* rbac network filter: a :ref:`role-based access control network filter ` has been added. +* rest-api: added ability to set the :ref:`request timeout ` for REST API requests. * route checker: added v2 config support and removed support for v1 configs. -* router: added ability to set request/response headers at the :ref:`v1.8.0:envoy_api_msg_route.Route` level. -* stats: added :ref:`option to configure the DogStatsD metric name prefix ` to DogStatsdSink. -* tcp_proxy: added support for :ref:`weighted clusters `. +* router: added ability to set request/response headers at the :ref:`v1.8:envoy_api_msg_route.Route` level. +* stats: added :ref:`option to configure the DogStatsD metric name prefix ` to DogStatsdSink. +* tcp_proxy: added support for :ref:`weighted clusters `. * thrift_proxy: introduced thrift routing, moved configuration to correct location * thrift_proxy: introduced thrift configurable decoder filters -* tls: implemented :ref:`Secret Discovery Service `. +* tls: implemented :ref:`Secret Discovery Service `. * tracing: added support for configuration of :ref:`tracing sampling - `. + `. * upstream: added configuration option to the subset load balancer to take locality weights into account when selecting a host from a subset. -* upstream: require opt-in to use the :ref:`x-envoy-original-dst-host ` header - for overriding destination address when using the :ref:`Original Destination ` +* upstream: require opt-in to use the :ref:`x-envoy-original-dst-host ` header + for overriding destination address when using the :ref:`Original Destination ` load balancing policy. Deprecated diff --git a/docs/root/version_history/v1.9.0.rst b/docs/root/version_history/v1.9.0.rst index d9056fb3aeb41..5ef6016945429 100644 --- a/docs/root/version_history/v1.9.0.rst +++ b/docs/root/version_history/v1.9.0.rst @@ -4,31 +4,31 @@ Changes ------- -* access log: added a :ref:`JSON logging mode ` to output access logs in JSON format. +* access log: added a :ref:`JSON logging mode ` to output access logs in JSON format. * access log: added dynamic metadata to access log messages streamed over gRPC. * access log: added DOWNSTREAM_CONNECTION_TERMINATION. * admin: :http:post:`/logging` now responds with 200 while there are no params. -* admin: added support for displaying subject alternate names in :ref:`certs ` end point. +* admin: added support for displaying subject alternate names in :ref:`certs ` end point. * admin: added host weight to the :http:get:`/clusters?format=json` end point response. * admin: :http:get:`/server_info` now responds with a JSON object instead of a single string. * admin: :http:get:`/server_info` now exposes what stage of initialization the server is currently in. * admin: added support for displaying command line options in :http:get:`/server_info` end point. * circuit-breaker: added cx_open, rq_pending_open, rq_open and rq_retry_open gauges to expose live - state via :ref:`circuit breakers statistics `. -* cluster: set a default of 1s for :ref:`option `. + state via :ref:`circuit breakers statistics `. +* cluster: set a default of 1s for :ref:`option `. * config: removed support for the v1 API. -* config: added support for :ref:`rate limiting ` discovery request calls. -* cors: added :ref:`invalid/valid stats ` to filter. +* config: added support for :ref:`rate limiting ` discovery request calls. +* cors: added :ref:`invalid/valid stats ` to filter. * ext-authz: added support for providing per route config - optionally disable the filter and provide context extensions. * fault: removed integer percentage support. * grpc-json: added support for :ref:`ignoring query parameters - `. -* health check: added :ref:`logging health check failure events `. + `. +* health check: added :ref:`logging health check failure events `. * health check: added ability to set :ref:`authority header value - ` for gRPC health check. -* http: added HTTP/2 WebSocket proxying via :ref:`extended CONNECT `. + ` for gRPC health check. +* http: added HTTP/2 WebSocket proxying via :ref:`extended CONNECT `. * http: added limits to the number and length of header modifications in all fields request_headers_to_add and response_headers_to_add. These limits are very high and should only be used as a last-resort safeguard. -* http: added support for a :ref:`request timeout `. The timeout is disabled by default. +* http: added support for a :ref:`request timeout `. The timeout is disabled by default. * http: no longer adding whitespace when appending X-Forwarded-For headers. **Warning**: this is not compatible with 1.7.0 builds prior to `9d3a4eb4ac44be9f0651fcc7f87ad98c538b01ee `_. See `#3611 `_ for details. @@ -36,66 +36,66 @@ Changes value to override the default HTTP to gRPC status mapping. * http: no longer close the TCP connection when a HTTP/1 request is retried due to a response with empty body. -* http: added support for more gRPC content-type headers in :ref:`gRPC bridge filter `, like application/grpc+proto. +* http: added support for more gRPC content-type headers in :ref:`gRPC bridge filter `, like application/grpc+proto. * listeners: all listener filters are now governed by the :ref:`listener_filters_timeout - ` setting. The hard coded 15s timeout in - the :ref:`TLS inspector listener filter ` is superseded by + ` setting. The hard coded 15s timeout in + the :ref:`TLS inspector listener filter ` is superseded by this setting. -* listeners: added the ability to match :ref:`FilterChain ` using :ref:`source_type `. -* load balancer: added a `configuration ` option to specify the number of choices made in P2C. +* listeners: added the ability to match :ref:`FilterChain ` using :ref:`source_type `. +* load balancer: added a `configuration ` option to specify the number of choices made in P2C. * logging: added missing [ in log prefix. -* mongo_proxy: added :ref:`dynamic metadata `. +* mongo_proxy: added :ref:`dynamic metadata `. * network: removed the reference to ``FilterState`` in ``Connection`` in favor of ``StreamInfo``. -* rate-limit: added :ref:`configuration ` +* rate-limit: added :ref:`configuration ` to specify whether the ``GrpcStatus`` status returned should be ``RESOURCE_EXHAUSTED`` or ``UNAVAILABLE`` when a gRPC call is rate limited. * rate-limit: removed support for the legacy ratelimit service and made the data-plane-api - :ref:`rls.proto ` based implementation default. -* rate-limit: removed the deprecated cluster_name attribute in :ref:`rate limit service configuration `. -* rate-limit: added :ref:`rate_limit_service ` configuration to filters. + :ref:`rls.proto ` based implementation default. +* rate-limit: removed the deprecated cluster_name attribute in :ref:`rate limit service configuration `. +* rate-limit: added :ref:`rate_limit_service ` configuration to filters. * rbac: added dynamic metadata to the network level filter. -* rbac: added support for permission matching by :ref:`requested server name `. +* rbac: added support for permission matching by :ref:`requested server name `. * redis: static cluster configuration is no longer required. Redis proxy will work with clusters delivered via CDS. -* router: added ability to configure arbitrary :ref:`retriable status codes. ` +* router: added ability to configure arbitrary :ref:`retriable status codes. ` * router: added ability to set attempt count in upstream requests, see :ref:`virtual host's include request - attempt count flag `. -* router: added internal :ref:`grpc-retry-on ` policy. -* router: added :ref:`scheme_redirect ` and - :ref:`port_redirect ` to define the respective + attempt count flag `. +* router: added internal :ref:`grpc-retry-on ` policy. +* router: added :ref:`scheme_redirect ` and + :ref:`port_redirect ` to define the respective scheme and port rewriting RedirectAction. -* router: when :ref:`max_grpc_timeout ` +* router: when :ref:`max_grpc_timeout ` is set, Envoy will now add or update the grpc-timeout header to reflect Envoy's expected timeout. * router: per try timeouts now starts when an upstream stream is ready instead of when the request has been fully decoded by Envoy. -* router: added support for not retrying :ref:`rate limited requests `. Rate limit filter now sets the :ref:`x-envoy-ratelimited ` +* router: added support for not retrying :ref:`rate limited requests `. Rate limit filter now sets the :ref:`x-envoy-ratelimited ` header so the rate limited requests that may have been retried earlier will not be retried with this change. -* router: added support for enabling upgrades on a :ref:`per-route ` basis. +* router: added support for enabling upgrades on a :ref:`per-route ` basis. * router: support configuring a default fraction of mirror traffic via - :ref:`runtime_fraction `. -* sandbox: added :ref:`cors sandbox `. + :ref:`runtime_fraction `. +* sandbox: added :ref:`cors sandbox `. * server: added ``SIGINT`` (Ctrl-C) handler to gracefully shutdown Envoy like ``SIGTERM``. -* stats: added :ref:`stats_matcher ` to the bootstrap config for granular control of stat instantiation. +* stats: added :ref:`stats_matcher ` to the bootstrap config for granular control of stat instantiation. * stream: renamed the ``RequestInfo`` namespace to ``StreamInfo`` to better match its behaviour within TCP and HTTP implementations. * stream: renamed ``perRequestState`` to ``filterState`` in ``StreamInfo``. * stream: added ``downstreamDirectRemoteAddress`` to ``StreamInfo``. * thrift_proxy: introduced thrift rate limiter filter. * tls: added ssl.curves., ssl.sigalgs. and ssl.versions. to - :ref:`listener metrics ` to track TLS algorithms and versions in use. -* tls: added support for :ref:`client-side session resumption `. -* tls: added support for CRLs in :ref:`trusted_ca `. -* tls: added support for :ref:`multiple server TLS certificates `. -* tls: added support for :ref:`password encrypted private keys `. -* tls: added the ability to build :ref:`BoringSSL FIPS ` using ``--define boringssl=fips`` Bazel option. + :ref:`listener metrics ` to track TLS algorithms and versions in use. +* tls: added support for :ref:`client-side session resumption `. +* tls: added support for CRLs in :ref:`trusted_ca `. +* tls: added support for :ref:`multiple server TLS certificates `. +* tls: added support for :ref:`password encrypted private keys `. +* tls: added the ability to build :ref:`BoringSSL FIPS ` using ``--define boringssl=fips`` Bazel option. * tls: removed support for ECDSA certificates with curves other than P-256. * tls: removed support for RSA certificates with keys smaller than 2048-bits. -* tracing: added support to the Zipkin tracer for the :ref:`b3 ` single header format. -* tracing: added support for :ref:`Datadog ` tracer. -* upstream: added :ref:`scale_locality_weight ` to enable +* tracing: added support to the Zipkin tracer for the :ref:`b3 ` single header format. +* tracing: added support for :ref:`Datadog ` tracer. +* upstream: added :ref:`scale_locality_weight ` to enable scaling locality weights by number of hosts removed by subset lb predicates. -* upstream: changed how load calculation for :ref:`priority levels ` and :ref:`panic thresholds ` interact. As long as normalized total health is 100% panic thresholds are disregarded. -* upstream: changed the default hash for :ref:`ring hash ` from std::hash to `xxHash `_. +* upstream: changed how load calculation for :ref:`priority levels ` and :ref:`panic thresholds ` interact. As long as normalized total health is 100% panic thresholds are disregarded. +* upstream: changed the default hash for :ref:`ring hash ` from std::hash to `xxHash `_. * upstream: when using active health checking and STRICT_DNS with several addresses that resolve to the same hosts, Envoy will now health check each host independently. diff --git a/docs/root/version_history/v1.9.1.rst b/docs/root/version_history/v1.9.1.rst index 7027026e5094f..e7121cdc51fa4 100644 --- a/docs/root/version_history/v1.9.1.rst +++ b/docs/root/version_history/v1.9.1.rst @@ -7,5 +7,5 @@ Changes * http: fixed CVE-2019-9900 by rejecting HTTP/1.x headers with embedded NUL characters. * http: fixed CVE-2019-9901 by normalizing HTTP paths prior to routing or L7 data plane processing. This defaults off and is configurable via either HTTP connection manager :ref:`normalize_path - ` - or the :ref:`runtime `. + ` + or the :ref:`runtime `. diff --git a/docs/root/version_history/version_history.rst b/docs/root/version_history/version_history.rst index 371bc2c398781..7cca61970b5bd 100644 --- a/docs/root/version_history/version_history.rst +++ b/docs/root/version_history/version_history.rst @@ -7,6 +7,7 @@ Version history :titlesonly: current + v1.20.0 v1.19.1 v1.19.0 v1.18.4 diff --git a/docs/v2_mapping.json b/docs/v2_mapping.json index 8d437476dff6e..4e9b014276d03 100644 --- a/docs/v2_mapping.json +++ b/docs/v2_mapping.json @@ -56,7 +56,7 @@ "envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto": "envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto", "envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto": "envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto", "envoy/extensions/filters/http/buffer/v3/buffer.proto": "envoy/config/filter/http/buffer/v2/buffer.proto", - "envoy/extensions/filters/http/cache/v3alpha/cache.proto": "envoy/config/filter/http/cache/v2alpha/cache.proto", + "envoy/extensions/filters/http/cache/v3/cache.proto": "envoy/config/filter/http/cache/v2alpha/cache.proto", "envoy/extensions/filters/http/compressor/v3/compressor.proto": "envoy/config/filter/http/compressor/v2/compressor.proto", "envoy/extensions/filters/http/cors/v3/cors.proto": "envoy/config/filter/http/cors/v2/cors.proto", "envoy/extensions/filters/http/csrf/v3/csrf.proto": "envoy/config/filter/http/csrf/v2/csrf.proto", diff --git a/envoy/api/BUILD b/envoy/api/BUILD index 904e5fff75f8a..cbdc13440690c 100644 --- a/envoy/api/BUILD +++ b/envoy/api/BUILD @@ -17,6 +17,7 @@ envoy_cc_library( "//envoy/event:scaled_range_timer_manager_interface", "//envoy/filesystem:filesystem_interface", "//envoy/server:process_context_interface", + "//envoy/stats:custom_stat_namespaces_interface", "//envoy/thread:thread_interface", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], diff --git a/envoy/api/api.h b/envoy/api/api.h index c83198beed8be..e567ac51c8e6d 100644 --- a/envoy/api/api.h +++ b/envoy/api/api.h @@ -10,6 +10,7 @@ #include "envoy/event/scaled_range_timer_manager.h" #include "envoy/filesystem/filesystem.h" #include "envoy/server/process_context.h" +#include "envoy/stats/custom_stat_namespaces.h" #include "envoy/stats/store.h" #include "envoy/thread/thread.h" @@ -89,6 +90,11 @@ class Api { * @return the bootstrap Envoy started with. */ virtual const envoy::config::bootstrap::v3::Bootstrap& bootstrap() const PURE; + + /** + * @return a reference to the Stats::CustomStatNamespaces. + */ + virtual Stats::CustomStatNamespaces& customStatNamespaces() PURE; }; using ApiPtr = std::unique_ptr; diff --git a/envoy/api/io_error.h b/envoy/api/io_error.h index f5de759194d18..049d14053b24f 100644 --- a/envoy/api/io_error.h +++ b/envoy/api/io_error.h @@ -35,6 +35,8 @@ class IoError { BadFd, // An existing connection was forcibly closed by the remote host. ConnectionReset, + // Network is unreachable due to network settings. + NetworkUnreachable, // Other error codes cannot be mapped to any one above in getErrorCode(). UnknownError }; diff --git a/envoy/api/os_sys_calls.h b/envoy/api/os_sys_calls.h index 0c0071742c6be..094959d239b6f 100644 --- a/envoy/api/os_sys_calls.h +++ b/envoy/api/os_sys_calls.h @@ -190,6 +190,21 @@ class OsSysCalls { * @see man TCP_INFO. Get the tcp info for the socket. */ virtual SysCallBoolResult socketTcpInfo(os_fd_t sockfd, EnvoyTcpInfo* tcp_info) PURE; + + /** + * return true if the OS supports getifaddrs. + */ + virtual bool supportsGetifaddrs() const PURE; + + /** + * @see man getifaddrs + */ + virtual SysCallIntResult getifaddrs(ifaddrs** ifap) PURE; + + /** + * @see man getifaddrs + */ + virtual void freeifaddrs(ifaddrs* ifp) PURE; }; using OsSysCallsPtr = std::unique_ptr; diff --git a/envoy/common/platform.h b/envoy/common/platform.h index e610caccb7ee1..30fc88a3afbb0 100644 --- a/envoy/common/platform.h +++ b/envoy/common/platform.h @@ -150,6 +150,7 @@ struct msghdr { #define SOCKET_ERROR_ADDR_IN_USE WSAEADDRINUSE #define SOCKET_ERROR_BADF WSAEBADF #define SOCKET_ERROR_CONNRESET WSAECONNRESET +#define SOCKET_ERROR_NETUNREACH WSAENETUNREACH #define HANDLE_ERROR_PERM ERROR_ACCESS_DENIED #define HANDLE_ERROR_INVALID ERROR_INVALID_HANDLE @@ -259,6 +260,7 @@ typedef int signal_t; // NOLINT(modernize-use-using) #define SOCKET_ERROR_ADDR_IN_USE EADDRINUSE #define SOCKET_ERROR_BADF EBADF #define SOCKET_ERROR_CONNRESET ECONNRESET +#define SOCKET_ERROR_NETUNREACH ENETUNREACH // Mapping POSIX file errors to common error names #define HANDLE_ERROR_PERM EACCES @@ -291,15 +293,25 @@ struct mmsghdr { }; #endif -#define SUPPORTS_GETIFADDRS -#ifdef WIN32 -#undef SUPPORTS_GETIFADDRS +// https://android.googlesource.com/platform/prebuilts/ndk/+/dev/platform/sysroot/usr/include/ifaddrs.h +#if defined(WIN32) || (defined(__ANDROID_API__) && __ANDROID_API__ < 24) +// Posix structure necessary for getifaddrs definition. +struct ifaddrs { + struct ifaddrs* ifa_next; + char* ifa_name; + unsigned int ifa_flags; + struct sockaddr* ifa_addr; + struct sockaddr* ifa_netmask; + struct sockaddr* ifa_dstaddr; + void* ifa_data; +}; #endif -// https://android.googlesource.com/platform/prebuilts/ndk/+/dev/platform/sysroot/usr/include/ifaddrs.h +// TODO: Remove once bazel supports NDKs > 21 +#define SUPPORTS_CPP_17_CONTIGUOUS_ITERATOR #ifdef __ANDROID_API__ #if __ANDROID_API__ < 24 -#undef SUPPORTS_GETIFADDRS +#undef SUPPORTS_CPP_17_CONTIGUOUS_ITERATOR #endif // __ANDROID_API__ < 24 #endif // ifdef __ANDROID_API__ diff --git a/envoy/http/BUILD b/envoy/http/BUILD index ee9e63a01d6c2..b9f7bfe3089b5 100644 --- a/envoy/http/BUILD +++ b/envoy/http/BUILD @@ -13,6 +13,7 @@ envoy_cc_library( hdrs = ["alternate_protocols_cache.h"], deps = [ "//envoy/common:time_interface", + "//envoy/event:dispatcher_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -32,6 +33,7 @@ envoy_cc_library( "//envoy/event:dispatcher_interface", "//envoy/tracing:http_tracer_interface", "//source/common/protobuf", + "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) diff --git a/envoy/http/alternate_protocols_cache.h b/envoy/http/alternate_protocols_cache.h index 5dbbff8c29096..535f40bd982db 100644 --- a/envoy/http/alternate_protocols_cache.h +++ b/envoy/http/alternate_protocols_cache.h @@ -9,6 +9,7 @@ #include "envoy/common/optref.h" #include "envoy/common/time.h" #include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/event/dispatcher.h" #include "absl/strings/string_view.h" @@ -92,10 +93,11 @@ class AlternateProtocolsCache { * Sets the possible alternative protocols which can be used to connect to the * specified origin. Expires after the specified expiration time. * @param origin The origin to set alternate protocols for. - * @param protocols A list of alternate protocols. + * @param protocols A list of alternate protocols. This list may be truncated + * by the cache. */ virtual void setAlternatives(const Origin& origin, - const std::vector& protocols) PURE; + std::vector& protocols) PURE; /** * Returns the possible alternative protocols which can be used to connect to the @@ -127,9 +129,11 @@ class AlternateProtocolsCacheManager { * Get an alternate protocols cache. * @param config supplies the cache parameters. If a cache exists with the same parameters it * will be returned, otherwise a new one will be created. + * @param dispatcher supplies the current thread's dispatcher, for cache creation. */ virtual AlternateProtocolsCacheSharedPtr - getCache(const envoy::config::core::v3::AlternateProtocolsCacheOptions& config) PURE; + getCache(const envoy::config::core::v3::AlternateProtocolsCacheOptions& config, + Event::Dispatcher& dispatcher) PURE; }; using AlternateProtocolsCacheManagerSharedPtr = std::shared_ptr; diff --git a/envoy/http/filter.h b/envoy/http/filter.h index 6dcf6f2f42f33..9a6527ca6519a 100644 --- a/envoy/http/filter.h +++ b/envoy/http/filter.h @@ -1043,6 +1043,12 @@ class FilterChainFactoryCallbacks { * @param handler supplies the handler to add. */ virtual void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) PURE; + + /** + * Allows filters to access the thread local dispatcher. + * @param return the worker thread's dispatcher. + */ + virtual Event::Dispatcher& dispatcher() PURE; }; /** diff --git a/envoy/http/header_map.h b/envoy/http/header_map.h index d50e173fecbb1..3fb5506c84f02 100644 --- a/envoy/http/header_map.h +++ b/envoy/http/header_map.h @@ -542,15 +542,14 @@ class HeaderMap { /** * Replaces a header value by copying the value. Copies the key if the key does not exist. + * If there are multiple values for one header, this removes all existing values and add + * the new one. * * Calling setCopy multiple times for the same header will result in only the last header * being present in the HeaderMap. * * @param key specifies the name of the header to set; it WILL be copied. * @param value specifies the value of the header to set; it WILL be copied. - * - * Caution: This iterates over the HeaderMap to find the header to set. This will modify only the - * first occurrence of the header. * TODO(asraa): Investigate whether necessary to set all headers with the key. */ virtual void setCopy(const LowerCaseString& key, absl::string_view value) PURE; diff --git a/envoy/http/query_params.h b/envoy/http/query_params.h index d30ae58b1ab36..e500f93ca3c86 100644 --- a/envoy/http/query_params.h +++ b/envoy/http/query_params.h @@ -2,6 +2,7 @@ #include #include +#include namespace Envoy { namespace Http { @@ -12,6 +13,7 @@ namespace Utility { // https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/http/query_params.h using QueryParams = std::map; +using QueryParamsVector = std::vector>; } // namespace Utility } // namespace Http diff --git a/envoy/network/connection_handler.h b/envoy/network/connection_handler.h index beabccddf8cf2..5cab5acdc26bc 100644 --- a/envoy/network/connection_handler.h +++ b/envoy/network/connection_handler.h @@ -132,6 +132,17 @@ class ConnectionHandler { * Stop listening according to implementation's own definition. */ virtual void shutdownListener() PURE; + + /** + * Update the listener config. + */ + virtual void updateListenerConfig(Network::ListenerConfig& config) PURE; + + /** + * Called when the given filter chains are about to be removed. + */ + virtual void onFilterChainDraining( + const std::list& draining_filter_chains) PURE; }; using ActiveListenerPtr = std::unique_ptr; diff --git a/envoy/network/dns.h b/envoy/network/dns.h index d2f7e23f5bae9..38dd39bf45760 100644 --- a/envoy/network/dns.h +++ b/envoy/network/dns.h @@ -46,7 +46,7 @@ struct DnsResponse { const std::chrono::seconds ttl_; }; -enum class DnsLookupFamily { V4Only, V6Only, Auto }; +enum class DnsLookupFamily { V4Only, V6Only, Auto, V4Preferred }; /** * An asynchronous DNS resolver. diff --git a/envoy/network/socket.h b/envoy/network/socket.h index e7200f8ace026..225b76297a06c 100644 --- a/envoy/network/socket.h +++ b/envoy/network/socket.h @@ -47,8 +47,6 @@ struct SocketOptionName { * Interfaces for providing a socket's various addresses. This is split into a getters interface * and a getters + setters interface. This is so that only the getters portion can be overridden * in certain cases. - * TODO(soulxu): Since there are more than address information inside the provider, this will be - * renamed as ConnectionInfoProvider. Ref https://github.com/envoyproxy/envoy/issues/17168 */ class ConnectionInfoProvider { public: diff --git a/envoy/protobuf/message_validator.h b/envoy/protobuf/message_validator.h index 6a2f02c274c7d..c31921832796b 100644 --- a/envoy/protobuf/message_validator.h +++ b/envoy/protobuf/message_validator.h @@ -56,6 +56,12 @@ class ValidationVisitor { * throw an exception. */ virtual void onDeprecatedField(absl::string_view description, bool soft_deprecation) PURE; + + /** + * Called when a message or field is marked as work in progress or a message is contained in a + * proto file marked as work in progress. + */ + virtual void onWorkInProgress(absl::string_view description) PURE; }; class ValidationContext { diff --git a/envoy/router/router.h b/envoy/router/router.h index 57591b2501a8c..b7d8f8db2f3a9 100644 --- a/envoy/router/router.h +++ b/envoy/router/router.h @@ -204,10 +204,15 @@ class RetryPolicy { virtual ~RetryPolicy() = default; /** - * @return std::chrono::milliseconds timeout per retry attempt. + * @return std::chrono::milliseconds timeout per retry attempt. 0 is disabled. */ virtual std::chrono::milliseconds perTryTimeout() const PURE; + /** + * @return std::chrono::milliseconds the optional per try idle timeout. 0 is disabled. + */ + virtual std::chrono::milliseconds perTryIdleTimeout() const PURE; + /** * @return uint32_t the number of retries to allow against the route. */ @@ -231,6 +236,13 @@ class RetryPolicy { */ virtual Upstream::RetryPrioritySharedPtr retryPriority() const PURE; + /** + * @return the retry options predicates for this policy. Each policy will be applied prior + * to retrying a request, allowing for request behavior to be customized. + */ + virtual absl::Span + retryOptionsPredicates() const PURE; + /** * Number of times host selection should be reattempted when selecting a host * for a retry attempt. diff --git a/envoy/server/factory_context.h b/envoy/server/factory_context.h index 03cbfe9c23ff4..349f07ea8c385 100644 --- a/envoy/server/factory_context.h +++ b/envoy/server/factory_context.h @@ -38,28 +38,26 @@ namespace Envoy { namespace Server { namespace Configuration { -/** - * Common interface for downstream and upstream network filters. - */ -class CommonFactoryContext { +// Shared factory context between server factories and cluster factories +class FactoryContextBase { public: - virtual ~CommonFactoryContext() = default; + virtual ~FactoryContextBase() = default; /** - * @return Upstream::ClusterManager& singleton for use by the entire server. + * @return Server::Options& the command-line options that Envoy was started with. */ - virtual Upstream::ClusterManager& clusterManager() PURE; + virtual const Options& options() PURE; /** * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used * for all singleton processing. */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Event::Dispatcher& mainThreadDispatcher() PURE; /** - * @return Server::Options& the command-line options that Envoy was started with. + * @return Api::Api& a reference to the api object. */ - virtual const Options& options() PURE; + virtual Api::Api& api() PURE; /** * @return information about the local environment the server is running in. @@ -67,10 +65,9 @@ class CommonFactoryContext { virtual const LocalInfo::LocalInfo& localInfo() const PURE; /** - * @return ProtobufMessage::ValidationContext& validation visitor for xDS and static configuration - * messages. + * @return Server::Admin& the server's global admin HTTP endpoint. */ - virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE; + virtual Server::Admin& admin() PURE; /** * @return Runtime::Loader& the singleton runtime loader for the server. @@ -78,47 +75,58 @@ class CommonFactoryContext { virtual Envoy::Runtime::Loader& runtime() PURE; /** - * @return Stats::Scope& the filter's stats scope. + * @return Singleton::Manager& the server-wide singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; + + /** + * @return ProtobufMessage::ValidationVisitor& validation visitor for configuration messages. + */ + virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; + + /** + * @return Stats::Scope& the context's stats scope. */ virtual Stats::Scope& scope() PURE; /** - * @return Singleton::Manager& the server-wide singleton manager. + * @return Stats::Scope& the server wide stats scope. */ - virtual Singleton::Manager& singletonManager() PURE; + virtual Stats::Scope& serverScope() PURE; /** * @return ThreadLocal::SlotAllocator& the thread local storage engine for the server. This is * used to allow runtime lockless updates to configuration, etc. across multiple threads. */ virtual ThreadLocal::SlotAllocator& threadLocal() PURE; +}; +/** + * Common interface for downstream and upstream network filters. + */ +class CommonFactoryContext : public FactoryContextBase { +public: /** - * @return Server::Admin& the server's global admin HTTP endpoint. + * @return Upstream::ClusterManager& singleton for use by the entire server. */ - virtual Server::Admin& admin() PURE; + virtual Upstream::ClusterManager& clusterManager() PURE; /** - * @return TimeSource& a reference to the time source. + * @return ProtobufMessage::ValidationContext& validation visitor for xDS and static configuration + * messages. */ - virtual TimeSource& timeSource() PURE; + virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE; /** - * @return Api::Api& a reference to the api object. + * @return TimeSource& a reference to the time source. */ - virtual Api::Api& api() PURE; + virtual TimeSource& timeSource() PURE; /** * @return AccessLogManager for use by the entire server. */ virtual AccessLog::AccessLogManager& accessLogManager() PURE; - /** - * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration - * messages. - */ - virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; - /** * @return ServerLifecycleNotifier& the lifecycle notifier for the server. */ diff --git a/envoy/server/health_checker_config.h b/envoy/server/health_checker_config.h index 00584f5176517..82f27123db8c3 100644 --- a/envoy/server/health_checker_config.h +++ b/envoy/server/health_checker_config.h @@ -28,7 +28,7 @@ class HealthCheckerFactoryContext { * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used * for all singleton processing. */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Event::Dispatcher& mainThreadDispatcher() PURE; /* * @return Upstream::HealthCheckEventLoggerPtr the health check event logger for the diff --git a/envoy/server/resource_monitor_config.h b/envoy/server/resource_monitor_config.h index 18e211e3801f1..9f680f44f8f2a 100644 --- a/envoy/server/resource_monitor_config.h +++ b/envoy/server/resource_monitor_config.h @@ -22,7 +22,7 @@ class ResourceMonitorFactoryContext { * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used * for all singleton processing. */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Event::Dispatcher& mainThreadDispatcher() PURE; /** * @return Server::Options& the command-line options that Envoy was started with. diff --git a/envoy/server/transport_socket_config.h b/envoy/server/transport_socket_config.h index 38308a9f1a642..703116ef250cc 100644 --- a/envoy/server/transport_socket_config.h +++ b/envoy/server/transport_socket_config.h @@ -62,7 +62,7 @@ class TransportSocketFactoryContext { /** * @return Event::Dispatcher& the main thread's dispatcher. */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Event::Dispatcher& mainThreadDispatcher() PURE; /** * @return Server::Options& the command-line options that Envoy was started with. diff --git a/envoy/singleton/manager.h b/envoy/singleton/manager.h index 995c333f87446..d2f235bdf0677 100644 --- a/envoy/singleton/manager.h +++ b/envoy/singleton/manager.h @@ -73,6 +73,15 @@ class Manager { return std::dynamic_pointer_cast(get(name, cb)); } + /** + * This is a non-constructing getter. Use when the caller can deal with instances where + * the singleton being accessed may not have been constructed previously. + * @return InstancePtr the singleton. nullptr if the singleton does not exist. + */ + template std::shared_ptr getTyped(const std::string& name) { + return std::dynamic_pointer_cast(get(name, [] { return nullptr; })); + } + /** * Get a singleton and create it if it does not exist. * @param name supplies the singleton name. Must be registered via RegistrationImpl. diff --git a/envoy/stats/BUILD b/envoy/stats/BUILD index 0516c76b1552b..ae9b06419d568 100644 --- a/envoy/stats/BUILD +++ b/envoy/stats/BUILD @@ -78,3 +78,8 @@ envoy_cc_library( hdrs = ["primitive_stats_macros.h"], deps = [":primitive_stats_interface"], ) + +envoy_cc_library( + name = "custom_stat_namespaces_interface", + hdrs = ["custom_stat_namespaces.h"], +) diff --git a/envoy/stats/custom_stat_namespaces.h b/envoy/stats/custom_stat_namespaces.h new file mode 100644 index 0000000000000..175a80efe6b7a --- /dev/null +++ b/envoy/stats/custom_stat_namespaces.h @@ -0,0 +1,52 @@ +#pragma once + +#include "envoy/common/pure.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace Envoy { +namespace Stats { + +/** + * CustomStatNamespaces manages custom stat namespaces. Custom stat namespaces are registered + * by extensions that create user-defined metrics, and these metrics are all prefixed + * by the namespace. For example, Wasm extension registers "wasmcustom" as a custom stat namespace, + * and all the metrics created by user Wasm programs are prefixed by "wasmcustom." internally. + * This is mainly for distinguishing these "custom metrics" defined outside Envoy codebase from + * the native metrics defined by Envoy codebase, and this way stat sinks are able to determine + * how to expose these two kinds of metrics. + * Note that the implementation will not be thread-safe so users of this class must be in the main + * thread. + */ +class CustomStatNamespaces { +public: + virtual ~CustomStatNamespaces() = default; + + /** + * @param name is the name to check. + * @return true if the given name is registered as a custom stat namespace, false otherwise. + */ + virtual bool registered(const absl::string_view name) const PURE; + + /** + * Used to register a custom namespace by extensions. + * @param name is the name to register. + */ + virtual void registerStatNamespace(const absl::string_view name) PURE; + + /** + * Strips the registered custom stat namespace from the given stat name's prefix if it lives in a + * registered custom stat namespace, and the stripped string is returned. Otherwise return + * nullopt. + * @param stat_name is the view to modify. If it is not in any custom registered namespaces, it + * will never be modified. + * @return the stripped string if stat_name has a registered custom stat namespace. Otherwise, + * return nullopt. + */ + virtual absl::optional + stripRegisteredPrefix(const absl::string_view stat_name) const PURE; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/envoy/stats/symbol_table.h b/envoy/stats/symbol_table.h index e7f171b6f8c91..e7fcb3526b487 100644 --- a/envoy/stats/symbol_table.h +++ b/envoy/stats/symbol_table.h @@ -56,7 +56,7 @@ class SymbolTable { * into the SymbolTable, which will not be optimal, but in practice appears * to be pretty good. * - * This is exposed in the interface for the benefit of join(), which which is + * This is exposed in the interface for the benefit of join(), which is * used in the hot-path to append two stat-names into a temp without taking * locks. This is used then in thread-local cache lookup, so that once warm, * no locks are taken when looking up stats. @@ -128,7 +128,7 @@ class SymbolTable { * * @param names A pointer to the first name in an array, allocated by the caller. * @param num_names The number of names. - * @param symbol_table The symbol table in which to encode the names. + * @param list The StatNameList representing the stat names. */ virtual void populateList(const StatName* names, uint32_t num_names, StatNameList& list) PURE; diff --git a/envoy/stream_info/stream_info.h b/envoy/stream_info/stream_info.h index 8d01eca502e83..1a48d18e2c949 100644 --- a/envoy/stream_info/stream_info.h +++ b/envoy/stream_info/stream_info.h @@ -158,6 +158,8 @@ struct ResponseCodeDetailValues { const std::string UpstreamTimeout = "upstream_response_timeout"; // The final upstream try timed out. const std::string UpstreamPerTryTimeout = "upstream_per_try_timeout"; + // The final upstream try idle timed out. + const std::string UpstreamPerTryIdleTimeout = "upstream_per_try_idle_timeout"; // The request was destroyed because of user defined max stream duration. const std::string UpstreamMaxStreamDurationReached = "upstream_max_stream_duration_reached"; // The upstream connection was reset before a response was started. This diff --git a/envoy/upstream/cluster_factory.h b/envoy/upstream/cluster_factory.h index 6196cef791576..a6f5d70e61320 100644 --- a/envoy/upstream/cluster_factory.h +++ b/envoy/upstream/cluster_factory.h @@ -18,6 +18,7 @@ #include "envoy/network/dns.h" #include "envoy/runtime/runtime.h" #include "envoy/server/admin.h" +#include "envoy/server/factory_context.h" #include "envoy/server/options.h" #include "envoy/singleton/manager.h" #include "envoy/ssl/context.h" @@ -35,66 +36,28 @@ namespace Upstream { * Context passed to cluster factory to access envoy resources. Cluster factory should only access * the rest of the server through this context object. */ -class ClusterFactoryContext { +class ClusterFactoryContext : public Server::Configuration::FactoryContextBase { public: - virtual ~ClusterFactoryContext() = default; - /** * @return bool flag indicating whether the cluster is added via api. */ virtual bool addedViaApi() PURE; - /** - * @return Server::Admin& the server's admin interface. - */ - virtual Server::Admin& admin() PURE; - - /** - * @return Api::Api& a reference to the api object. - */ - virtual Api::Api& api() PURE; - /** * @return Upstream::ClusterManager& singleton for use by the entire server. */ - virtual ClusterManager& clusterManager() PURE; - - /** - * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used - * for all singleton processing. - */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Upstream::ClusterManager& clusterManager() PURE; /** * @return Network::DnsResolverSharedPtr the dns resolver for the server. */ virtual Network::DnsResolverSharedPtr dnsResolver() PURE; - /** - * @return information about the local environment the server is running in. - */ - virtual const LocalInfo::LocalInfo& localInfo() PURE; - - /** - * @return Server::Options& the command-line options that Envoy was started with. - */ - virtual const Server::Options& options() PURE; - /** * @return AccessLogManager for use by the entire server. */ virtual AccessLog::AccessLogManager& logManager() PURE; - /** - * @return Runtime::Loader& the singleton runtime loader for the server. - */ - virtual Runtime::Loader& runtime() PURE; - - /** - * @return Singleton::Manager& the server-wide singleton manager. - */ - virtual Singleton::Manager& singletonManager() PURE; - /** * @return Ssl::ContextManager& the SSL context manager. */ @@ -105,21 +68,15 @@ class ClusterFactoryContext { */ virtual Stats::Store& stats() PURE; - /** - * @return the server's TLS slot allocator. - */ - virtual ThreadLocal::SlotAllocator& tls() PURE; - /** * @return Outlier::EventLoggerSharedPtr sink for outlier detection event logs. */ virtual Outlier::EventLoggerSharedPtr outlierEventLogger() PURE; - /** - * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration - * messages. - */ - virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; + // Server::Configuration::FactoryContextBase + Stats::Scope& scope() override { return stats(); } + + Stats::Scope& serverScope() override { return stats(); } }; /** diff --git a/envoy/upstream/cluster_manager.h b/envoy/upstream/cluster_manager.h index 20ca0acca4df0..e24790038d241 100644 --- a/envoy/upstream/cluster_manager.h +++ b/envoy/upstream/cluster_manager.h @@ -321,6 +321,12 @@ class ClusterManager { * Drain all connection pool connections owned by all clusters in the cluster manager. */ virtual void drainConnections() PURE; + + /** + * Check if the cluster is active and statically configured, and if not, throw exception. + * @param cluster, the cluster to check. + */ + virtual void checkActiveStaticCluster(const std::string& cluster) PURE; }; using ClusterManagerPtr = std::unique_ptr; @@ -406,6 +412,11 @@ class ClusterManagerFactory { * Returns the secret manager. */ virtual Secret::SecretManager& secretManager() PURE; + + /** + * Returns the singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; }; /** diff --git a/envoy/upstream/retry.h b/envoy/upstream/retry.h index f772d54029179..9e1a8de57995f 100644 --- a/envoy/upstream/retry.h +++ b/envoy/upstream/retry.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/config/typed_config.h" +#include "envoy/singleton/manager.h" #include "envoy/upstream/types.h" #include "envoy/upstream/upstream.h" @@ -92,13 +93,58 @@ class RetryHostPredicate { using RetryHostPredicateSharedPtr = std::shared_ptr; +/** + * A predicate that is applied prior to retrying a request. Each predicate can customize request + * behavior prior to the request being retried. + */ +class RetryOptionsPredicate { +public: + struct UpdateOptionsParameters { + // Stream info for the previous request attempt that is about to be retried. + const StreamInfo::StreamInfo& retriable_request_stream_info_; + // The current upstream socket options that were used for connection pool selection on the + // previous attempt, or the result of an updated set of options from a previously run + // retry options predicate. + Network::Socket::OptionsSharedPtr current_upstream_socket_options_; + }; + + struct UpdateOptionsReturn { + // New upstream socket options to apply to the next request attempt. If changed, will affect + // connection pool selection similar to that which was done for the initial request. + absl::optional new_upstream_socket_options_; + }; + + virtual ~RetryOptionsPredicate() = default; + + /** + * Update request options. + * @param parameters supplies the update parameters. + * @return the new options to apply. Each option is wrapped in an optional and is only applied + * if valid. + */ + virtual UpdateOptionsReturn updateOptions(const UpdateOptionsParameters& parameters) const PURE; +}; + +using RetryOptionsPredicateConstSharedPtr = std::shared_ptr; + +/** + * Context for all retry extensions. + */ +class RetryExtensionFactoryContext { +public: + virtual ~RetryExtensionFactoryContext() = default; + + /** + * @return Singleton::Manager& the server-wide singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; +}; + /** * Factory for RetryPriority. */ class RetryPriorityFactory : public Config::TypedFactory { public: - ~RetryPriorityFactory() override = default; - virtual RetryPrioritySharedPtr createRetryPriority(const Protobuf::Message& config, ProtobufMessage::ValidationVisitor& validation_visitor, @@ -112,13 +158,23 @@ class RetryPriorityFactory : public Config::TypedFactory { */ class RetryHostPredicateFactory : public Config::TypedFactory { public: - ~RetryHostPredicateFactory() override = default; - virtual RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message& config, uint32_t retry_count) PURE; std::string category() const override { return "envoy.retry_host_predicates"; } }; +/** + * Factory for RetryOptionsPredicate. + */ +class RetryOptionsPredicateFactory : public Config::TypedFactory { +public: + virtual RetryOptionsPredicateConstSharedPtr + createOptionsPredicate(const Protobuf::Message& config, + RetryExtensionFactoryContext& context) PURE; + + std::string category() const override { return "envoy.retry_options_predicates"; } +}; + } // namespace Upstream } // namespace Envoy diff --git a/envoy/upstream/upstream.h b/envoy/upstream/upstream.h index 30bf5d8bb211c..732e48ef5f461 100644 --- a/envoy/upstream/upstream.h +++ b/envoy/upstream/upstream.h @@ -575,6 +575,7 @@ class PrioritySet { COUNTER(upstream_rq_pending_overflow) \ COUNTER(upstream_rq_pending_total) \ COUNTER(upstream_rq_per_try_timeout) \ + COUNTER(upstream_rq_per_try_idle_timeout) \ COUNTER(upstream_rq_retry) \ COUNTER(upstream_rq_retry_backoff_exponential) \ COUNTER(upstream_rq_retry_backoff_ratelimited) \ @@ -834,6 +835,12 @@ class ClusterInfo { virtual const absl::optional& clusterType() const PURE; + /** + * @return configuration for round robin load balancing, only used if LB type is round robin. + */ + virtual const absl::optional& + lbRoundRobinConfig() const PURE; + /** * @return configuration for least request load balancing, only used if LB type is least request. */ diff --git a/examples/cache/front-envoy.yaml b/examples/cache/front-envoy.yaml index e6111b745eeca..4754cbeebfc8c 100644 --- a/examples/cache/front-envoy.yaml +++ b/examples/cache/front-envoy.yaml @@ -29,9 +29,9 @@ static_resources: http_filters: - name: "envoy.filters.http.cache" typed_config: - "@type": "type.googleapis.com/envoy.extensions.filters.http.cache.v3alpha.CacheConfig" + "@type": "type.googleapis.com/envoy.extensions.filters.http.cache.v3.CacheConfig" typed_config: - "@type": "type.googleapis.com/envoy.extensions.cache.simple_http_cache.v3alpha.SimpleHttpCacheConfig" + "@type": "type.googleapis.com/envoy.extensions.cache.simple_http_cache.v3.SimpleHttpCacheConfig" - name: envoy.filters.http.router clusters: diff --git a/examples/dynamic-config-fs/configs/lds.yaml b/examples/dynamic-config-fs/configs/lds.yaml index 4770f538ac25f..a0b8ba1fa6fa9 100644 --- a/examples/dynamic-config-fs/configs/lds.yaml +++ b/examples/dynamic-config-fs/configs/lds.yaml @@ -7,12 +7,12 @@ resources: port_value: 10000 filter_chains: - filters: - name: envoy.http_connection_manager + name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http http_filters: - - name: envoy.router + - name: envoy.filters.http.router route_config: name: local_route virtual_hosts: diff --git a/examples/grpc-bridge/client/Dockerfile b/examples/grpc-bridge/client/Dockerfile index 49425a773bc0b..6159167e8e8d7 100644 --- a/examples/grpc-bridge/client/Dockerfile +++ b/examples/grpc-bridge/client/Dockerfile @@ -1,11 +1,11 @@ -FROM grpc/python +FROM python:3.8-slim WORKDIR /client COPY requirements.txt /client/requirements.txt # Cache the dependencies -RUN pip install -r /client/requirements.txt +RUN pip install --require-hashes -r /client/requirements.txt # Copy the sources, including the stubs COPY client.py /client/grpc-kv-client.py diff --git a/examples/grpc-bridge/client/client.py b/examples/grpc-bridge/client/client.py index 8bcf29f22cbab..e98607cbcfcad 100755 --- a/examples/grpc-bridge/client/client.py +++ b/examples/grpc-bridge/client/client.py @@ -19,7 +19,7 @@ """.format(host=HOST) -class KVClient(): +class KVClient: def get(self, key): r = kv.GetRequest(key=key) @@ -40,7 +40,7 @@ def set(self, key, value): return requests.post(HOST + "/kv.KV/Set", data=data, headers=HEADERS) -def run(): +def main(): if len(sys.argv) == 1: print(USAGE) @@ -82,4 +82,4 @@ def run(): if __name__ == '__main__': - run() + main() diff --git a/examples/grpc-bridge/client/requirements.in b/examples/grpc-bridge/client/requirements.in new file mode 100644 index 0000000000000..96b06d428c7ef --- /dev/null +++ b/examples/grpc-bridge/client/requirements.in @@ -0,0 +1,4 @@ +requests>=2.22.0 +grpcio +grpcio-tools +protobuf>=3.18.0 diff --git a/examples/grpc-bridge/client/requirements.txt b/examples/grpc-bridge/client/requirements.txt index c6c2fd2fa343c..e8e3d07215473 100644 --- a/examples/grpc-bridge/client/requirements.txt +++ b/examples/grpc-bridge/client/requirements.txt @@ -1,4 +1,155 @@ -requests>=2.22.0 -grpcio -grpcio-tools -protobuf==3.17.3 +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile --allow-unsafe --generate-hashes requirements.in +# +certifi==2021.5.30 \ + --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ + --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 + # via requests +charset-normalizer==2.0.6 \ + --hash=sha256:5d209c0a931f215cee683b6445e2d77677e7e75e159f78def0db09d68fafcaa6 \ + --hash=sha256:5ec46d183433dcbd0ab716f2d7f29d8dee50505b3fdb40c6b985c7c4f5a3591f + # via requests +grpcio-tools==1.41.0 \ + --hash=sha256:022ea466300fd8eee03375795c764b8d01aee7ba614c1d7ba198eef9eaebc07a \ + --hash=sha256:05730f1acd3fa70e63a62fe37377297774db7f4794fb6ae3e43f64aa354460f8 \ + --hash=sha256:08654c9f723fa644be52cc8f975c01bb93a99808ab02c2e64a20e9c9e92c9a3b \ + --hash=sha256:0d6489ed1310250f152d6170ee539e84bfc364bbfdffbbe98e8ce9297c4a1550 \ + --hash=sha256:17a759203f627b941086a65a0c3f39c5da41f11d11dc8ca5883e844c055876dd \ + --hash=sha256:2d48309bbbb2d7144117748718ca52eb60f10dd86a0cb8a0a5f952ee08575bee \ + --hash=sha256:3891b1df82369acbc8451d4952cd20755f49a82398dce62437511ad17b47290e \ + --hash=sha256:3c7f6c8559ac6bea6029b8c5d188d24509d30a28816de02c723659f56e862b98 \ + --hash=sha256:3f6c2bff12e2015bd69c600710fb427720446034ed9a237cd6edf7e2452cf826 \ + --hash=sha256:3f860f8a804f6ef6ea545483c1506d184f9bba40f635c6886d79791822c679e3 \ + --hash=sha256:4b48c13dbbf96d36a41e45fd011eeabc1541ec8705f2d533fa4c20634f750885 \ + --hash=sha256:50a9f66502e4868c20bc0b8c1c7d3b21e6b6b2578a7aef6ce7c28294b9eba911 \ + --hash=sha256:51bdc4bd088592d5f52b5cb6d3be072bf0d847a7af92e544f9885acdf5de1252 \ + --hash=sha256:55915c61baae316b607be6ff5be72614efc067e50dfffd389bde95c240a5416e \ + --hash=sha256:57f35fd71366f1eecd4c08b9d8eda1007d371827f092ae916b4235744e9175a6 \ + --hash=sha256:5b1edfcfa4f21c210bfe66534af9fa5ca37374bb0e0d1754018e0d92c8fe4c8e \ + --hash=sha256:5d15f5dd0c01f914ab15e921484b71aff0eff8aa123b22d76e71c76be8d81efc \ + --hash=sha256:5f52f7d8841372a047493ee9722810856a4adfa38330b4a688a1421dd3460518 \ + --hash=sha256:5f85be3053486cc53b41fe888957f61e98d6aab74b0726a54cf35e4a685f2b96 \ + --hash=sha256:602b7dd5e52924794f19f637ec042bc141b7d9dd127ddc662b28c42f8db08e95 \ + --hash=sha256:609f6e4cad800f0b2caa0b46baefbb30444bddfc94d1429b9add02d5e6759001 \ + --hash=sha256:6622feec0a3f326fb86cf01bf1bcbfec23548ae4d80706d88b296d792d816f0e \ + --hash=sha256:7145e9243718bd8a4792547efb1443846cebb3d36d49dca52d5f9edfb81aa256 \ + --hash=sha256:7242b39d16970319b11c13832f3474d09be53cbc88bc05c54140f5394a247184 \ + --hash=sha256:731c78b612ca672af0f4682e68d331d304a3eccd1836f0b89402c332aa653815 \ + --hash=sha256:7f3bf213d7b182628bdfb10854cc7b19d4882e1916786fc3a14f724555a7e824 \ + --hash=sha256:85b4cd4a77c27df984dce5b14eafa29c54abd134335230b59fa8d096c995b877 \ + --hash=sha256:898b032ddcd25a051c6c8892b76779b8821e073fc363e6105dc08efd95857bcd \ + --hash=sha256:8cf6ab58c14b7bd4cf5b4d652e2bfafc6543d38210d68332ccccff4733bcc615 \ + --hash=sha256:8f7cd5b8eeae570743cfd0ece36f62b32424b995ee3862697cfe94bc9c4fa5fe \ + --hash=sha256:98d9e581bc9ad154697af40c0109221926628d57fab2a52a1fa2cfed401349d5 \ + --hash=sha256:9ff9fdef6df6b3d1e4395158f4bd2bfab58867370bd4b4ed81a1a2ab20de085b \ + --hash=sha256:a111af9732c1ac85b35b894c4b6150127c52349ca220c0708d241d4bb8ee4622 \ + --hash=sha256:a1e2db4c90cb07d6b8f1526346df65da85dce995e7aa7c4db76bcc2a99dcbf43 \ + --hash=sha256:a4e08366f780b439499645fbb0b7788cccd978c06158b19e915726bfbe420031 \ + --hash=sha256:b78a3225302b60e59a922d909413b2c0de2ba19f4dc79273411dfad560e21418 \ + --hash=sha256:b8e9181327b94886f6214cfe2147721c6b60138c111d78313b9070f4068020b5 \ + --hash=sha256:c13b6a37fe3619be603265a14a614f86fa97a95934e6447de2bc9e66f9a35590 \ + --hash=sha256:c93137598d5f2b4d163aff571197be92d3c691a5d82dabb29b1ef467e3c29db6 \ + --hash=sha256:ceefaa88c066c9c779f15e8d58d57d3763efef3d0dbec483be99bc75ae0e2d70 \ + --hash=sha256:db64aa08ae500cb20c9f377e41a66e493c4cba27ab99710852340ef81c7d0e30 \ + --hash=sha256:dc65beee944735d4cb42c8c43e284ff711512d1f7a029bdbaeb0729243f3a702 \ + --hash=sha256:e1814b98a955aad08107eb4c4f068b1cd147cc923a2480bc2fae51007bb7866b \ + --hash=sha256:f4c03f312877e57b47beda2e9db5a39bc3af65ee22b38e85b4c0f94b3b9c26af + # via -r requirements.in +grpcio==1.41.0 \ + --hash=sha256:056806e83eaa09d0af0e452dd353db8f7c90aa2dedcce1112a2d21592550f6b1 \ + --hash=sha256:07594e585a5ba25cf331ddb63095ca51010c34e328a822cb772ffbd5daa62cb5 \ + --hash=sha256:0abd56d90dff3ed566807520de1385126dded21e62d3490a34c180a91f94c1f4 \ + --hash=sha256:15c04d695833c739dbb25c88eaf6abd9a461ec0dbd32f44bc8769335a495cf5a \ + --hash=sha256:1820845e7e6410240eff97742e9f76cd5bf10ca01d36a322e86c0bd5340ac25b \ + --hash=sha256:1bcbeac764bbae329bc2cc9e95d0f4d3b0fb456b92cf12e7e06e3e860a4b31cf \ + --hash=sha256:2410000eb57cf76b05b37d2aee270b686f0a7876710850a2bba92b4ed133e026 \ + --hash=sha256:2882b62f74de8c8a4f7b2be066f6230ecc46f4edc8f42db1fb7358200abe3b25 \ + --hash=sha256:297ee755d3c6cd7e7d3770f298f4d4d4b000665943ae6d2888f7407418a9a510 \ + --hash=sha256:39ce785f0cbd07966a9019386b7a054615b2da63da3c7727f371304d000a1890 \ + --hash=sha256:3a92e4df5330cd384984e04804104ae34f521345917813aa86fc0930101a3697 \ + --hash=sha256:3bbeee115b05b22f6a9fa9bc78f9ab8d9d6bb8c16fdfc60401fc8658beae1099 \ + --hash=sha256:4537bb9e35af62c5189493792a8c34d127275a6d175c8ad48b6314cacba4021e \ + --hash=sha256:462178987f0e5c60d6d1b79e4e95803a4cd789db961d6b3f087245906bb5ae04 \ + --hash=sha256:5292a627b44b6d3065de4a364ead23bab3c9d7a7c05416a9de0c0624d0fe03f4 \ + --hash=sha256:5502832b7cec670a880764f51a335a19b10ff5ab2e940e1ded67f39b88aa02b1 \ + --hash=sha256:585847ed190ea9cb4d632eb0ebf58f1d299bbca5e03284bc3d0fa08bab6ea365 \ + --hash=sha256:59645b2d9f19b5ff30cb46ddbcaa09c398f9cd81e4e476b21c7c55ae1e942807 \ + --hash=sha256:5d4b30d068b022e412adcf9b14c0d9bcbc872e9745b91467edc0a4c700a8bba6 \ + --hash=sha256:7033199706526e7ee06a362e38476dfdf2ddbad625c19b67ed30411d1bb25a18 \ + --hash=sha256:7b07cbbd4eea56738e995fcbba3b60e41fd9aa9dac937fb7985c5dcbc7626260 \ + --hash=sha256:7da3f6f6b857399c9ad85bcbffc83189e547a0a1a777ab68f5385154f8bc1ed4 \ + --hash=sha256:83c1e731c2b76f26689ad88534cafefe105dcf385567bead08f5857cb308246b \ + --hash=sha256:9674a9d3f23702e35a89e22504f41b467893cf704f627cc9cdd118cf1dcc8e26 \ + --hash=sha256:9ecd0fc34aa46eeac24f4d20e67bafaf72ca914f99690bf2898674905eaddaf9 \ + --hash=sha256:a0c4bdd1d646365d10ba1468bcf234ea5ad46e8ce2b115983e8563248614910a \ + --hash=sha256:a144f6cecbb61aace12e5920840338a3d246123a41d795e316e2792e9775ad15 \ + --hash=sha256:a3cd7f945d3e3b82ebd2a4c9862eb9891a5ac87f84a7db336acbeafd86e6c402 \ + --hash=sha256:a614224719579044bd7950554d3b4c1793bb5715cbf0f0399b1f21d283c40ef6 \ + --hash=sha256:ace080a9c3c673c42adfd2116875a63fec9613797be01a6105acf7721ed0c693 \ + --hash=sha256:b2de4e7b5a930be04a4d05c9f5fce7e9191217ccdc174b026c2a7928770dca9f \ + --hash=sha256:b6b68c444abbaf4a2b944a61cf35726ab9645f45d416bcc7cf4addc4b2f2d53d \ + --hash=sha256:be3c6ac822edb509aeef41361ca9c8c5ee52cb9e4973e1977d2bb7d6a460fd97 \ + --hash=sha256:c07acd49541f5f6f9984fe0adf162d77bf70e0f58e77f9960c6f571314ff63a4 \ + --hash=sha256:c1e0a4c86d4cbd93059d5eeceed6e1c2e3e1494e1bf40be9b8ab14302c576162 \ + --hash=sha256:c8c5bc498f6506b6041c30afb7a55c57a9fd535d1a0ac7cdba9b5fd791a85633 \ + --hash=sha256:c95dd6e60e059ff770a2ac9f5a202b75dd64d76b0cd0c48f27d58907e43ed6a6 \ + --hash=sha256:ccd2f1cf11768d1f6fbe4e13e8b8fb0ccfe9914ceeff55a367d5571e82eeb543 \ + --hash=sha256:d0cc0393744ce3ce1b237ae773635cc928470ff46fb0d3f677e337a38e5ed4f6 \ + --hash=sha256:d539ebd05a2bbfbf897d41738d37d162d5c3d9f2b1f8ddf2c4f75e2c9cf59907 \ + --hash=sha256:d71aa430b2ac40e18e388504ac34cc91d49d811855ca507c463a21059bf364f0 \ + --hash=sha256:dcb5f324712a104aca4a459e524e535f205f36deb8005feb4f9d3ff0a22b5177 \ + --hash=sha256:e516124010ef60d5fc2e0de0f1f987599249dc55fd529001f17f776a4145767f \ + --hash=sha256:fb64abf0d92134cb0ba4496a3b7ab918588eee42de20e5b3507fe6ee16db97ee + # via + # -r requirements.in + # grpcio-tools +idna==3.2 \ + --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ + --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 + # via requests +protobuf==3.18.0 \ + --hash=sha256:0a59ea8da307118372750e2fdfe0961622e675b8dd35e05c42384d618189a938 \ + --hash=sha256:17181fc0814655812aac108e755bd5185d71aa8d81bd241cec6e232c84097918 \ + --hash=sha256:18b308946a592e245299391e53c01b5b8efc2794f49986e80f37d7b5e60a270f \ + --hash=sha256:1f3ecec3038c2fb4dad952d3d6cb9ca301999903a09e43794fb348da48f7577f \ + --hash=sha256:3b5b81bb665aac548b413480f4e0d8c38a74bc4dea57835f288a3ce74f63dfe9 \ + --hash=sha256:42c04e66ec5a38ad2171639dc9860c2f9594668f709ea3a4a192acf7346853a7 \ + --hash=sha256:5201333b7aa711965c5769b250f8565a9924e8e27f8b622bbc5e6847aeaab1b1 \ + --hash=sha256:568c049ff002a7523ed33fb612e6b97da002bf87ffb619a1fc3eadf2257a3b31 \ + --hash=sha256:5730de255c95b3403eedd1a568eb28203b913b6192ff5a3fdc3ff30f37107a38 \ + --hash=sha256:615099e52e9fbc9fde00177267a94ca820ecf4e80093e390753568b7d8cb3c1a \ + --hash=sha256:7646c20605fbee57e77fdbc4a90175538281b152f46ba17019916593f8062c2a \ + --hash=sha256:7e791a94db391ae22b3943fc88f6ba0e1f62b6ad58b33db7517df576c7834d23 \ + --hash=sha256:80b0a5157f3a53043daf8eb7cfa1220b27a5a63dd6655dbd8e1e6f7b5dcd6347 \ + --hash=sha256:877664b1b8d1e23553634f625e4e12aae4ff16cbbef473f8118c239d478f422a \ + --hash=sha256:9072cb18fca8998b77f969fb74d25a11d7f4a39a8b1ddc3cf76cd5abda8499cb \ + --hash=sha256:9147565f93e6699d7512747766598afe63205f226ac7b61f47954974c9aab852 \ + --hash=sha256:93c077fd83879cf48f327a2491c24da447a09da6a7ab3cc311a6f5a61fcb5de0 \ + --hash=sha256:d11465040cadcea8ecf5f0b131af5099a9696f9d0bef6f88148b372bacc1c52d \ + --hash=sha256:f589346b5b3f702c1d30e2343c9897e6c35e7bd495c10a0e17d11ecb5ee5bd06 \ + --hash=sha256:f6138462643adce0ed6e49007a63b7fd7dc4fda1ef4e15a70fcebe76c1407a71 \ + --hash=sha256:f7c8193ec805324ff6024242b00f64a24b94d56b895f62bf28a9d72a228d4fca + # via + # -r requirements.in + # grpcio-tools +requests==2.26.0 \ + --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ + --hash=sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7 + # via -r requirements.in +six==1.16.0 \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via grpcio +urllib3==1.26.7 \ + --hash=sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece \ + --hash=sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844 + # via requests + +# The following packages are considered to be unsafe in a requirements file: +setuptools==58.2.0 \ + --hash=sha256:2551203ae6955b9876741a26ab3e767bb3242dafe86a32a749ea0d78b6792f11 \ + --hash=sha256:2c55bdb85d5bb460bd2e3b12052b677879cffcf46c0c688f2e5bf51d36001145 + # via grpcio-tools diff --git a/examples/wasm-cc/docker-compose-wasm.yaml b/examples/wasm-cc/docker-compose-wasm.yaml index 072928843f428..537b46d2485e4 100644 --- a/examples/wasm-cc/docker-compose-wasm.yaml +++ b/examples/wasm-cc/docker-compose-wasm.yaml @@ -2,7 +2,7 @@ version: "3.7" services: wasm_compile_update: - image: envoyproxy/envoy-build-ubuntu:55d9e4719d2bd0accce8f829b44dab70cd42112a + image: envoyproxy/envoy-build-ubuntu:81a93046060dbe5620d5b3aa92632090a9ee4da6 command: | bash -c "bazel build //examples/wasm-cc:envoy_filter_http_wasm_updated_example.wasm && cp -a bazel-bin/examples/wasm-cc/* /build" working_dir: /source @@ -11,7 +11,7 @@ services: - ./lib:/build wasm_compile: - image: envoyproxy/envoy-build-ubuntu:55d9e4719d2bd0accce8f829b44dab70cd42112a + image: envoyproxy/envoy-build-ubuntu:81a93046060dbe5620d5b3aa92632090a9ee4da6 command: | bash -c "bazel build //examples/wasm-cc:envoy_filter_http_wasm_example.wasm && cp -a bazel-bin/examples/wasm-cc/* /build" working_dir: /source diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD deleted file mode 100644 index 93f9184a2b400..0000000000000 --- a/generated_api_shadow/BUILD +++ /dev/null @@ -1,268 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "v2_protos", - visibility = ["//visibility:public"], - deps = [ - "//envoy/admin/v2alpha:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/auth:pkg", - "//envoy/api/v2/cluster:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/endpoint:pkg", - "//envoy/api/v2/listener:pkg", - "//envoy/api/v2/ratelimit:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/config/bootstrap/v2:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/config/filter/fault/v2:pkg", - "//envoy/config/filter/network/http_connection_manager/v2:pkg", - "//envoy/config/filter/network/redis_proxy/v2:pkg", - "//envoy/config/filter/network/tcp_proxy/v2:pkg", - "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", - "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/health_checker/redis/v2:pkg", - "//envoy/config/listener/v2:pkg", - "//envoy/config/metrics/v2:pkg", - "//envoy/config/overload/v2alpha:pkg", - "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", - "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", - "//envoy/config/retry/omit_canary_hosts/v2:pkg", - "//envoy/config/retry/previous_hosts/v2:pkg", - "//envoy/config/trace/v2:pkg", - "//envoy/config/trace/v2alpha:pkg", - "//envoy/config/transport_socket/alts/v2alpha:pkg", - "//envoy/data/accesslog/v2:pkg", - "//envoy/data/tap/v2alpha:pkg", - "//envoy/service/accesslog/v2:pkg", - "//envoy/service/auth/v2:pkg", - "//envoy/service/discovery/v2:pkg", - "//envoy/service/load_stats/v2:pkg", - "//envoy/service/metrics/v2:pkg", - "//envoy/service/ratelimit/v2:pkg", - "//envoy/service/status/v2:pkg", - "//envoy/service/tap/v2alpha:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "//envoy/type/metadata/v2:pkg", - "//envoy/type/tracing/v2:pkg", - ], -) - -proto_library( - name = "v3_protos", - visibility = ["//visibility:public"], - deps = [ - "//contrib/envoy/extensions/filters/http/squash/v3:pkg", - "//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg", - "//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg", - "//contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha:pkg", - "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", - "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", - "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", - "//envoy/admin/v3:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/bootstrap/v3:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/common/matcher/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/grpc_credential/v3:pkg", - "//envoy/config/health_checker/redis/v2:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/config/metrics/v3:pkg", - "//envoy/config/overload/v3:pkg", - "//envoy/config/ratelimit/v3:pkg", - "//envoy/config/rbac/v3:pkg", - "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", - "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", - "//envoy/config/retry/omit_canary_hosts/v2:pkg", - "//envoy/config/retry/previous_hosts/v2:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/config/tap/v3:pkg", - "//envoy/config/trace/v3:pkg", - "//envoy/data/accesslog/v3:pkg", - "//envoy/data/cluster/v3:pkg", - "//envoy/data/core/v3:pkg", - "//envoy/data/dns/v3:pkg", - "//envoy/data/tap/v3:pkg", - "//envoy/extensions/access_loggers/file/v3:pkg", - "//envoy/extensions/access_loggers/grpc/v3:pkg", - "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg", - "//envoy/extensions/access_loggers/stream/v3:pkg", - "//envoy/extensions/access_loggers/wasm/v3:pkg", - "//envoy/extensions/cache/simple_http_cache/v3alpha:pkg", - "//envoy/extensions/clusters/aggregate/v3:pkg", - "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/clusters/redis/v3:pkg", - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", - "//envoy/extensions/common/matching/v3:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "//envoy/extensions/common/tap/v3:pkg", - "//envoy/extensions/compression/brotli/compressor/v3:pkg", - "//envoy/extensions/compression/brotli/decompressor/v3:pkg", - "//envoy/extensions/compression/gzip/compressor/v3:pkg", - "//envoy/extensions/compression/gzip/decompressor/v3:pkg", - "//envoy/extensions/filters/common/dependency/v3:pkg", - "//envoy/extensions/filters/common/fault/v3:pkg", - "//envoy/extensions/filters/common/matcher/action/v3:pkg", - "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", - "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", - "//envoy/extensions/filters/http/alternate_protocols_cache/v3:pkg", - "//envoy/extensions/filters/http/aws_lambda/v3:pkg", - "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", - "//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg", - "//envoy/extensions/filters/http/buffer/v3:pkg", - "//envoy/extensions/filters/http/cache/v3alpha:pkg", - "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", - "//envoy/extensions/filters/http/composite/v3:pkg", - "//envoy/extensions/filters/http/compressor/v3:pkg", - "//envoy/extensions/filters/http/cors/v3:pkg", - "//envoy/extensions/filters/http/csrf/v3:pkg", - "//envoy/extensions/filters/http/decompressor/v3:pkg", - "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/filters/http/dynamo/v3:pkg", - "//envoy/extensions/filters/http/ext_authz/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", - "//envoy/extensions/filters/http/fault/v3:pkg", - "//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg", - "//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg", - "//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg", - "//envoy/extensions/filters/http/grpc_stats/v3:pkg", - "//envoy/extensions/filters/http/grpc_web/v3:pkg", - "//envoy/extensions/filters/http/gzip/v3:pkg", - "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", - "//envoy/extensions/filters/http/health_check/v3:pkg", - "//envoy/extensions/filters/http/ip_tagging/v3:pkg", - "//envoy/extensions/filters/http/jwt_authn/v3:pkg", - "//envoy/extensions/filters/http/kill_request/v3:pkg", - "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", - "//envoy/extensions/filters/http/lua/v3:pkg", - "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", - "//envoy/extensions/filters/http/on_demand/v3:pkg", - "//envoy/extensions/filters/http/original_src/v3:pkg", - "//envoy/extensions/filters/http/ratelimit/v3:pkg", - "//envoy/extensions/filters/http/rbac/v3:pkg", - "//envoy/extensions/filters/http/router/v3:pkg", - "//envoy/extensions/filters/http/set_metadata/v3:pkg", - "//envoy/extensions/filters/http/tap/v3:pkg", - "//envoy/extensions/filters/http/wasm/v3:pkg", - "//envoy/extensions/filters/listener/http_inspector/v3:pkg", - "//envoy/extensions/filters/listener/original_dst/v3:pkg", - "//envoy/extensions/filters/listener/original_src/v3:pkg", - "//envoy/extensions/filters/listener/proxy_protocol/v3:pkg", - "//envoy/extensions/filters/listener/tls_inspector/v3:pkg", - "//envoy/extensions/filters/network/client_ssl_auth/v3:pkg", - "//envoy/extensions/filters/network/connection_limit/v3:pkg", - "//envoy/extensions/filters/network/direct_response/v3:pkg", - "//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg", - "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", - "//envoy/extensions/filters/network/echo/v3:pkg", - "//envoy/extensions/filters/network/ext_authz/v3:pkg", - "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", - "//envoy/extensions/filters/network/local_ratelimit/v3:pkg", - "//envoy/extensions/filters/network/mongo_proxy/v3:pkg", - "//envoy/extensions/filters/network/ratelimit/v3:pkg", - "//envoy/extensions/filters/network/rbac/v3:pkg", - "//envoy/extensions/filters/network/redis_proxy/v3:pkg", - "//envoy/extensions/filters/network/sni_cluster/v3:pkg", - "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", - "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", - "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", - "//envoy/extensions/filters/network/thrift_proxy/router/v3:pkg", - "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", - "//envoy/extensions/filters/network/wasm/v3:pkg", - "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", - "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", - "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", - "//envoy/extensions/formatter/metadata/v3:pkg", - "//envoy/extensions/formatter/req_without_query/v3:pkg", - "//envoy/extensions/health_checkers/redis/v3:pkg", - "//envoy/extensions/http/header_formatters/preserve_case/v3:pkg", - "//envoy/extensions/http/original_ip_detection/custom_header/v3:pkg", - "//envoy/extensions/http/original_ip_detection/xff/v3:pkg", - "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", - "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", - "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", - "//envoy/extensions/key_value/file_based/v3:pkg", - "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", - "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", - "//envoy/extensions/matching/input_matchers/ip/v3:pkg", - "//envoy/extensions/network/socket_interface/v3:pkg", - "//envoy/extensions/quic/crypto_stream/v3:pkg", - "//envoy/extensions/quic/proof_source/v3:pkg", - "//envoy/extensions/rate_limit_descriptors/expr/v3:pkg", - "//envoy/extensions/request_id/uuid/v3:pkg", - "//envoy/extensions/resource_monitors/fixed_heap/v3:pkg", - "//envoy/extensions/resource_monitors/injected_resource/v3:pkg", - "//envoy/extensions/retry/host/omit_canary_hosts/v3:pkg", - "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", - "//envoy/extensions/retry/host/previous_hosts/v3:pkg", - "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", - "//envoy/extensions/stat_sinks/graphite_statsd/v3:pkg", - "//envoy/extensions/stat_sinks/wasm/v3:pkg", - "//envoy/extensions/transport_sockets/alts/v3:pkg", - "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", - "//envoy/extensions/transport_sockets/quic/v3:pkg", - "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/s2a/v3alpha:pkg", - "//envoy/extensions/transport_sockets/starttls/v3:pkg", - "//envoy/extensions/transport_sockets/tap/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/extensions/upstreams/http/generic/v3:pkg", - "//envoy/extensions/upstreams/http/http/v3:pkg", - "//envoy/extensions/upstreams/http/tcp/v3:pkg", - "//envoy/extensions/upstreams/http/v3:pkg", - "//envoy/extensions/upstreams/tcp/generic/v3:pkg", - "//envoy/extensions/wasm/v3:pkg", - "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", - "//envoy/service/accesslog/v3:pkg", - "//envoy/service/auth/v3:pkg", - "//envoy/service/cluster/v3:pkg", - "//envoy/service/discovery/v3:pkg", - "//envoy/service/endpoint/v3:pkg", - "//envoy/service/event_reporting/v3:pkg", - "//envoy/service/ext_proc/v3alpha:pkg", - "//envoy/service/extension/v3:pkg", - "//envoy/service/health/v3:pkg", - "//envoy/service/listener/v3:pkg", - "//envoy/service/load_stats/v3:pkg", - "//envoy/service/metrics/v3:pkg", - "//envoy/service/ratelimit/v3:pkg", - "//envoy/service/route/v3:pkg", - "//envoy/service/runtime/v3:pkg", - "//envoy/service/secret/v3:pkg", - "//envoy/service/status/v3:pkg", - "//envoy/service/tap/v3:pkg", - "//envoy/service/trace/v3:pkg", - "//envoy/type/http/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/metadata/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "//envoy/watchdog/v3alpha:pkg", - ], -) - -proto_library( - name = "all_protos", - visibility = ["//visibility:public"], - deps = [ - ":v2_protos", - ":v3_protos", - ], -) - -filegroup( - name = "proto_breaking_change_detector_buf_config", - srcs = [ - "buf.yaml", - ], - visibility = ["//visibility:public"], -) diff --git a/generated_api_shadow/README.md b/generated_api_shadow/README.md deleted file mode 100644 index 04633c218a7c4..0000000000000 --- a/generated_api_shadow/README.md +++ /dev/null @@ -1,6 +0,0 @@ -This directory is for generated Envoy internal artifacts (via `proto_format`). - -Do not hand edit any file under `envoy/`. This shadow API may only be used in -the Envoy source tree. - -The `bazel/` tree is a symlink back to the official API Bazel rules. diff --git a/generated_api_shadow/bazel/BUILD b/generated_api_shadow/bazel/BUILD deleted file mode 100644 index 0e5c8aea75b01..0000000000000 --- a/generated_api_shadow/bazel/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") - -licenses(["notice"]) # Apache 2 - -exports_files([ - "repository_locations.bzl", - "repository_locations_utils.bzl", -]) - -go_proto_compiler( - name = "pgv_plugin_go", - options = ["lang=go"], - plugin = "@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate", - suffix = ".pb.validate.go", - valid_archive = False, - visibility = ["//visibility:public"], -) diff --git a/generated_api_shadow/bazel/api_build_system.bzl b/generated_api_shadow/bazel/api_build_system.bzl deleted file mode 100644 index 8a0e0bf71021e..0000000000000 --- a/generated_api_shadow/bazel/api_build_system.bzl +++ /dev/null @@ -1,199 +0,0 @@ -load("@rules_cc//cc:defs.bzl", "cc_test") -load("@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library") -load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library") -load("@com_google_protobuf//:protobuf.bzl", _py_proto_library = "py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") -load("@io_bazel_rules_go//go:def.bzl", "go_test") -load("@rules_proto//proto:defs.bzl", "proto_library") -load( - "//bazel:external_proto_deps.bzl", - "EXTERNAL_PROTO_CC_BAZEL_DEP_MAP", - "EXTERNAL_PROTO_GO_BAZEL_DEP_MAP", - "EXTERNAL_PROTO_PY_BAZEL_DEP_MAP", -) - -_PY_PROTO_SUFFIX = "_py_proto" -_CC_PROTO_SUFFIX = "_cc_proto" -_CC_GRPC_SUFFIX = "_cc_grpc" -_GO_PROTO_SUFFIX = "_go_proto" -_GO_IMPORTPATH_PREFIX = "github.com/envoyproxy/go-control-plane/" - -_COMMON_PROTO_DEPS = [ - "@com_google_protobuf//:any_proto", - "@com_google_protobuf//:descriptor_proto", - "@com_google_protobuf//:duration_proto", - "@com_google_protobuf//:empty_proto", - "@com_google_protobuf//:struct_proto", - "@com_google_protobuf//:timestamp_proto", - "@com_google_protobuf//:wrappers_proto", - "@com_google_googleapis//google/api:http_proto", - "@com_google_googleapis//google/api:httpbody_proto", - "@com_google_googleapis//google/api:annotations_proto", - "@com_google_googleapis//google/rpc:status_proto", - "@com_envoyproxy_protoc_gen_validate//validate:validate_proto", -] - -def _proto_mapping(dep, proto_dep_map, proto_suffix): - mapped = proto_dep_map.get(dep) - if mapped == None: - prefix = "@" + Label(dep).workspace_name if not dep.startswith("//") else "" - return prefix + "//" + Label(dep).package + ":" + Label(dep).name + proto_suffix - return mapped - -def _go_proto_mapping(dep): - return _proto_mapping(dep, EXTERNAL_PROTO_GO_BAZEL_DEP_MAP, _GO_PROTO_SUFFIX) - -def _cc_proto_mapping(dep): - return _proto_mapping(dep, EXTERNAL_PROTO_CC_BAZEL_DEP_MAP, _CC_PROTO_SUFFIX) - -def _py_proto_mapping(dep): - return _proto_mapping(dep, EXTERNAL_PROTO_PY_BAZEL_DEP_MAP, _PY_PROTO_SUFFIX) - -# TODO(htuch): Convert this to native py_proto_library once -# https://github.com/bazelbuild/bazel/issues/3935 and/or -# https://github.com/bazelbuild/bazel/issues/2626 are resolved. -def _api_py_proto_library(name, srcs = [], deps = []): - _py_proto_library( - name = name + _PY_PROTO_SUFFIX, - srcs = srcs, - default_runtime = "@com_google_protobuf//:protobuf_python", - protoc = "@com_google_protobuf//:protoc", - deps = [_py_proto_mapping(dep) for dep in deps] + [ - "@com_envoyproxy_protoc_gen_validate//validate:validate_py", - "@com_google_googleapis//google/rpc:status_py_proto", - "@com_google_googleapis//google/api:annotations_py_proto", - "@com_google_googleapis//google/api:http_py_proto", - "@com_google_googleapis//google/api:httpbody_py_proto", - ], - visibility = ["//visibility:public"], - ) - -# This defines googleapis py_proto_library. The repository does not provide its definition and requires -# overriding it in the consuming project (see https://github.com/grpc/grpc/issues/19255 for more details). -def py_proto_library(name, deps = [], plugin = None): - srcs = [dep[:-6] + ".proto" if dep.endswith("_proto") else dep for dep in deps] - proto_deps = [] - - # py_proto_library in googleapis specifies *_proto rules in dependencies. - # By rewriting *_proto to *.proto above, the dependencies in *_proto rules are not preserved. - # As a workaround, manually specify the proto dependencies for the imported python rules. - if name == "annotations_py_proto": - proto_deps = proto_deps + [":http_py_proto"] - - # checked.proto depends on syntax.proto, we have to add this dependency manually as well. - if name == "checked_py_proto": - proto_deps = proto_deps + [":syntax_py_proto"] - - # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0: - # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72. - # plugin should also be passed in here when gRPC version is greater than v1.25.x. - _py_proto_library( - name = name, - srcs = srcs, - default_runtime = "@com_google_protobuf//:protobuf_python", - protoc = "@com_google_protobuf//:protoc", - deps = proto_deps + ["@com_google_protobuf//:protobuf_python"], - visibility = ["//visibility:public"], - ) - -def _api_cc_grpc_library(name, proto, deps = []): - cc_grpc_library( - name = name, - srcs = [proto], - deps = deps, - proto_only = False, - grpc_only = True, - visibility = ["//visibility:public"], - ) - -def api_cc_py_proto_library( - name, - visibility = ["//visibility:private"], - srcs = [], - deps = [], - linkstatic = 0, - has_services = 0): - relative_name = ":" + name - proto_library( - name = name, - srcs = srcs, - deps = deps + _COMMON_PROTO_DEPS, - visibility = visibility, - ) - cc_proto_library_name = name + _CC_PROTO_SUFFIX - pgv_cc_proto_library( - name = cc_proto_library_name, - linkstatic = linkstatic, - cc_deps = [_cc_proto_mapping(dep) for dep in deps] + [ - "@com_google_googleapis//google/api:http_cc_proto", - "@com_google_googleapis//google/api:httpbody_cc_proto", - "@com_google_googleapis//google/api:annotations_cc_proto", - "@com_google_googleapis//google/rpc:status_cc_proto", - ], - deps = [relative_name], - visibility = ["//visibility:public"], - ) - _api_py_proto_library(name, srcs, deps) - - # Optionally define gRPC services - if has_services: - # TODO: when Python services are required, add to the below stub generations. - cc_grpc_name = name + _CC_GRPC_SUFFIX - cc_proto_deps = [cc_proto_library_name] + [_cc_proto_mapping(dep) for dep in deps] - _api_cc_grpc_library(name = cc_grpc_name, proto = relative_name, deps = cc_proto_deps) - -def api_cc_test(name, **kwargs): - cc_test( - name = name, - **kwargs - ) - -def api_go_test(name, **kwargs): - go_test( - name = name, - **kwargs - ) - -def api_proto_package( - name = "pkg", - srcs = [], - deps = [], - has_services = False, - visibility = ["//visibility:public"]): - if srcs == []: - srcs = native.glob(["*.proto"]) - - name = "pkg" - api_cc_py_proto_library( - name = name, - visibility = visibility, - srcs = srcs, - deps = deps, - has_services = has_services, - ) - - compilers = ["@io_bazel_rules_go//proto:go_proto", "@envoy_api//bazel:pgv_plugin_go"] - if has_services: - compilers = ["@io_bazel_rules_go//proto:go_grpc", "@envoy_api//bazel:pgv_plugin_go"] - - # Because RBAC proro depends on googleapis syntax.proto and checked.proto, - # which share the same go proto library, it causes duplicative dependencies. - # Thus, we use depset().to_list() to remove duplicated depenencies. - go_proto_library( - name = name + _GO_PROTO_SUFFIX, - compilers = compilers, - importpath = _GO_IMPORTPATH_PREFIX + native.package_name(), - proto = name, - visibility = ["//visibility:public"], - deps = depset([_go_proto_mapping(dep) for dep in deps] + [ - "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/api:annotations_go_proto", - "@go_googleapis//google/rpc:status_go_proto", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - ]).to_list(), - ) diff --git a/generated_api_shadow/bazel/envoy_http_archive.bzl b/generated_api_shadow/bazel/envoy_http_archive.bzl deleted file mode 100644 index 15fd65b2af278..0000000000000 --- a/generated_api_shadow/bazel/envoy_http_archive.bzl +++ /dev/null @@ -1,22 +0,0 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -def envoy_http_archive(name, locations, **kwargs): - # `existing_rule_keys` contains the names of repositories that have already - # been defined in the Bazel workspace. By skipping repos with existing keys, - # users can override dependency versions by using standard Bazel repository - # rules in their WORKSPACE files. - existing_rule_keys = native.existing_rules().keys() - if name in existing_rule_keys: - # This repository has already been defined, probably because the user - # wants to override the version. Do nothing. - return - location = locations[name] - - # HTTP tarball at a given URL. Add a BUILD file if requested. - http_archive( - name = name, - urls = location["urls"], - sha256 = location["sha256"], - strip_prefix = location.get("strip_prefix", ""), - **kwargs - ) diff --git a/generated_api_shadow/bazel/external_deps.bzl b/generated_api_shadow/bazel/external_deps.bzl deleted file mode 100644 index e8283e4fee106..0000000000000 --- a/generated_api_shadow/bazel/external_deps.bzl +++ /dev/null @@ -1,143 +0,0 @@ -load("@envoy_api//bazel:repository_locations_utils.bzl", "load_repository_locations_spec") - -# Envoy dependencies may be annotated with the following attributes: -DEPENDENCY_ANNOTATIONS = [ - # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID - # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See - # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements - # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned. - # This attribute is optional for components with use categories listed in the - # USE_CATEGORIES_WITH_CPE_OPTIONAL - "cpe", - - # A list of extensions when 'use_category' contains 'dataplane_ext' or 'observability_ext'. - "extensions", - - # Additional dependencies loaded transitively via this dependency that are not tracked in - # Envoy (see the external dependency at the given version for information). - "implied_untracked_deps", - - # Project metadata. - "project_desc", - "project_name", - "project_url", - - # Reflects the UTC date (YYYY-MM-DD format) for the dependency release. This - # is when the dependency was updated in its repository. For dependencies - # that have releases, this is the date of the release. For dependencies - # without releases or for scenarios where we temporarily need to use a - # commit, this date should be the date of the commit in UTC. - "release_date", - - # List of the categories describing how the dependency is being used. This attribute is used - # for automatic tracking of security posture of Envoy's dependencies. - # Possible values are documented in the USE_CATEGORIES list below. - # This attribute is mandatory for each dependecy. - "use_category", - - # The dependency version. This may be either a tagged release (preferred) - # or git SHA (as an exception when no release tagged version is suitable). - "version", -] - -# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed -# to be declared. -USE_CATEGORIES = [ - # This dependency is used in API protos. - "api", - # This dependency is used in build process. - "build", - # This dependency is used to process xDS requests. - "controlplane", - # This dependency is used in processing downstream or upstream requests (core). - "dataplane_core", - # This dependency is used in processing downstream or upstream requests (extensions). - "dataplane_ext", - # This dependecy is used for logging, metrics or tracing (core). It may process unstrusted input. - "observability_core", - # This dependecy is used for logging, metrics or tracing (extensions). It may process unstrusted input. - "observability_ext", - # This dependency does not handle untrusted data and is used for various utility purposes. - "other", - # This dependency is used only in tests. - "test_only", - # Documentation generation - "docs", - # Developer tools (not used in build or docs) - "devtools", -] - -# Components with these use categories are not required to specify the 'cpe'. -USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only", "api"] - -def _fail_missing_attribute(attr, key): - fail("The '%s' attribute must be defined for external dependecy " % attr + key) - -# Method for verifying content of the repository location specifications. -# -# We also remove repository metadata attributes so that further consumers, e.g. -# http_archive, are not confused by them. -def load_repository_locations(repository_locations_spec): - locations = {} - for key, location in load_repository_locations_spec(repository_locations_spec).items(): - mutable_location = dict(location) - locations[key] = mutable_location - - if "sha256" not in location or len(location["sha256"]) == 0: - _fail_missing_attribute("sha256", key) - - if "project_name" not in location: - _fail_missing_attribute("project_name", key) - - if "project_desc" not in location: - _fail_missing_attribute("project_desc", key) - - if "project_url" not in location: - _fail_missing_attribute("project_url", key) - project_url = location["project_url"] - if not project_url.startswith("https://") and not project_url.startswith("http://"): - fail("project_url must start with https:// or http://: " + project_url) - - if "version" not in location: - _fail_missing_attribute("version", key) - - if "use_category" not in location: - _fail_missing_attribute("use_category", key) - use_category = location["use_category"] - - if "dataplane_ext" in use_category or "observability_ext" in use_category: - if "extensions" not in location: - _fail_missing_attribute("extensions", key) - - if "release_date" not in location: - _fail_missing_attribute("release_date", key) - release_date = location["release_date"] - - # Starlark doesn't have regexes. - if len(release_date) != 10 or release_date[4] != "-" or release_date[7] != "-": - fail("release_date must match YYYY-DD-MM: " + release_date) - - if "cpe" in location: - cpe = location["cpe"] - - # Starlark doesn't have regexes. - cpe_components = len(cpe.split(":")) - - # We allow cpe:2.3:a:foo:*:* and cpe:2.3.:a:foo:bar:* only. - cpe_components_valid = (cpe_components == 6) - cpe_matches = (cpe == "N/A" or (cpe.startswith("cpe:2.3:a:") and cpe.endswith(":*") and cpe_components_valid)) - if not cpe_matches: - fail("CPE must match cpe:2.3:a:::*: " + cpe) - elif not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]: - _fail_missing_attribute("cpe", key) - - for category in location["use_category"]: - if category not in USE_CATEGORIES: - fail("Unknown use_category value '" + category + "' for dependecy " + key) - - # Remove any extra annotations that we add, so that we don't confuse http_archive etc. - for annotation in DEPENDENCY_ANNOTATIONS: - if annotation in mutable_location: - mutable_location.pop(annotation) - - return locations diff --git a/generated_api_shadow/bazel/external_proto_deps.bzl b/generated_api_shadow/bazel/external_proto_deps.bzl deleted file mode 100644 index 6b11495d3c0dc..0000000000000 --- a/generated_api_shadow/bazel/external_proto_deps.bzl +++ /dev/null @@ -1,48 +0,0 @@ -# Any external dependency imported in the api/ .protos requires entries in -# the maps below, to allow the Bazel proto and language specific bindings to be -# inferred from the import directives. -# -# This file needs to be interpreted as both Python 3 and Starlark, so only the -# common subset of Python should be used. - -# This maps from .proto import directive path to the Bazel dependency path for -# external dependencies. Since BUILD files are generated, this is the canonical -# place to define this mapping. -EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { - "google/api/expr/v1alpha1/checked.proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", - "google/api/expr/v1alpha1/syntax.proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - "io/prometheus/client/metrics.proto": "@prometheus_metrics_model//:client_model", - "opencensus/proto/trace/v1/trace.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", - "opencensus/proto/trace/v1/trace_config.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", - "opentelemetry/proto/common/v1/common.proto": "@opentelemetry_proto//:common", -} - -# This maps from the Bazel proto_library target to the Go language binding target for external dependencies. -EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", - "@opentelemetry_proto//:logs": "@opentelemetry_proto//:logs_go_proto", - "@opentelemetry_proto//:common": "@opentelemetry_proto//:common_go_proto", -} - -# This maps from the Bazel proto_library target to the C++ language binding target for external dependencies. -EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_cc_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_cc", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_cc", - "@opentelemetry_proto//:logs": "@opentelemetry_proto//:logs_cc_proto", - "@opentelemetry_proto//:common": "@opentelemetry_proto//:common_cc_proto", -} - -# This maps from the Bazel proto_library target to the Python language binding target for external dependencies. -EXTERNAL_PROTO_PY_BAZEL_DEP_MAP = { - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_py_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_py", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_py", - "@opentelemetry_proto//:logs": "@opentelemetry_proto//:logs_py_proto", - "@opentelemetry_proto//:common": "@opentelemetry_proto//:common_py_proto", -} diff --git a/generated_api_shadow/bazel/repositories.bzl b/generated_api_shadow/bazel/repositories.bzl deleted file mode 100644 index ef92aa45f0064..0000000000000 --- a/generated_api_shadow/bazel/repositories.bzl +++ /dev/null @@ -1,171 +0,0 @@ -load(":envoy_http_archive.bzl", "envoy_http_archive") -load(":external_deps.bzl", "load_repository_locations") -load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC") - -REPOSITORY_LOCATIONS = load_repository_locations(REPOSITORY_LOCATIONS_SPEC) - -# Use this macro to reference any HTTP archive from bazel/repository_locations.bzl. -def external_http_archive(name, **kwargs): - envoy_http_archive( - name, - locations = REPOSITORY_LOCATIONS, - **kwargs - ) - -def api_dependencies(): - external_http_archive( - name = "bazel_skylib", - ) - external_http_archive( - name = "com_envoyproxy_protoc_gen_validate", - ) - external_http_archive( - name = "com_google_googleapis", - ) - external_http_archive( - name = "com_github_bazelbuild_buildtools", - ) - external_http_archive( - name = "com_github_cncf_udpa", - ) - - external_http_archive( - name = "prometheus_metrics_model", - build_file_content = PROMETHEUSMETRICS_BUILD_CONTENT, - ) - external_http_archive( - name = "opencensus_proto", - ) - external_http_archive( - name = "rules_proto", - ) - external_http_archive( - name = "com_github_openzipkin_zipkinapi", - build_file_content = ZIPKINAPI_BUILD_CONTENT, - ) - external_http_archive( - name = "opentelemetry_proto", - build_file_content = OPENTELEMETRY_LOGS_BUILD_CONTENT, - ) - external_http_archive( - name = "com_github_bufbuild_buf", - build_file_content = BUF_BUILD_CONTENT, - tags = ["manual"], - ) - -PROMETHEUSMETRICS_BUILD_CONTENT = """ -load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -api_cc_py_proto_library( - name = "client_model", - srcs = [ - "io/prometheus/client/metrics.proto", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "client_model_go_proto", - importpath = "github.com/prometheus/client_model/go", - proto = ":client_model", - visibility = ["//visibility:public"], -) -""" - -OPENCENSUSTRACE_BUILD_CONTENT = """ -load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -api_cc_py_proto_library( - name = "trace_model", - srcs = [ - "trace.proto", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "trace_model_go_proto", - importpath = "trace_model", - proto = ":trace_model", - visibility = ["//visibility:public"], -) -""" - -ZIPKINAPI_BUILD_CONTENT = """ - -load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -api_cc_py_proto_library( - name = "zipkin", - srcs = [ - "zipkin-jsonv2.proto", - "zipkin.proto", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "zipkin_go_proto", - proto = ":zipkin", - visibility = ["//visibility:public"], -) -""" - -OPENTELEMETRY_LOGS_BUILD_CONTENT = """ -load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -api_cc_py_proto_library( - name = "common", - srcs = [ - "opentelemetry/proto/common/v1/common.proto", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "common_go_proto", - importpath = "go.opentelemetry.io/proto/otlp/common/v1", - proto = ":common", - visibility = ["//visibility:public"], -) - -# TODO(snowp): Generating one Go package from all of these protos could cause problems in the future, -# but nothing references symbols from collector or resource so we're fine for now. -api_cc_py_proto_library( - name = "logs", - srcs = [ - "opentelemetry/proto/collector/logs/v1/logs_service.proto", - "opentelemetry/proto/logs/v1/logs.proto", - "opentelemetry/proto/resource/v1/resource.proto", - ], - deps = [ - "//:common", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "logs_go_proto", - importpath = "go.opentelemetry.io/proto/otlp/logs/v1", - proto = ":logs", - visibility = ["//visibility:public"], -) -""" - -BUF_BUILD_CONTENT = """ -package( - default_visibility = ["//visibility:public"], -) - -filegroup( - name = "buf", - srcs = [ - "@com_github_bufbuild_buf//:bin/buf", - ], - tags = ["manual"], # buf is downloaded as a linux binary; tagged manual to prevent build for non-linux users -) -""" diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl deleted file mode 100644 index be1e9c9789e4b..0000000000000 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ /dev/null @@ -1,133 +0,0 @@ -# This should match the schema defined in external_deps.bzl. -REPOSITORY_LOCATIONS_SPEC = dict( - bazel_skylib = dict( - project_name = "bazel-skylib", - project_desc = "Common useful functions and rules for Bazel", - project_url = "https://github.com/bazelbuild/bazel-skylib", - version = "1.0.3", - sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", - release_date = "2020-08-27", - urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"], - use_category = ["api"], - ), - com_envoyproxy_protoc_gen_validate = dict( - project_name = "protoc-gen-validate (PGV)", - project_desc = "protoc plugin to generate polyglot message validators", - project_url = "https://github.com/envoyproxy/protoc-gen-validate", - version = "0.6.1", - sha256 = "c695fc5a2e5a1b52904cd8a58ce7a1c3a80f7f50719496fd606e551685c01101", - release_date = "2021-04-26", - strip_prefix = "protoc-gen-validate-{version}", - urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/v{version}.tar.gz"], - use_category = ["api"], - implied_untracked_deps = [ - "com_github_iancoleman_strcase", - "com_github_lyft_protoc_gen_star", - "com_github_spf13_afero", - "org_golang_google_genproto", - "org_golang_x_text", - ], - ), - com_github_bazelbuild_buildtools = dict( - project_name = "Bazel build tools", - project_desc = "Developer tools for working with Google's bazel buildtool.", - project_url = "https://github.com/bazelbuild/buildtools", - version = "4.0.1", - sha256 = "c28eef4d30ba1a195c6837acf6c75a4034981f5b4002dda3c5aa6e48ce023cf1", - release_date = "2021-03-01", - strip_prefix = "buildtools-{version}", - urls = ["https://github.com/bazelbuild/buildtools/archive/{version}.tar.gz"], - use_category = ["api"], - ), - com_github_cncf_udpa = dict( - project_name = "xDS API", - project_desc = "xDS API Working Group (xDS-WG)", - project_url = "https://github.com/cncf/xds", - # During the UDPA -> xDS migration, we aren't working with releases. - version = "dd25fe81a44506ab21ea666fb70b3b1c4bb183ee", - sha256 = "9184235cd31272679e4c7f9232c341d4ea75351ded74d3fbba28b05c290bfa71", - release_date = "2021-07-22", - strip_prefix = "xds-{version}", - urls = ["https://github.com/cncf/xds/archive/{version}.tar.gz"], - use_category = ["api"], - ), - com_github_openzipkin_zipkinapi = dict( - project_name = "Zipkin API", - project_desc = "Zipkin's language independent model and HTTP Api Definitions", - project_url = "https://github.com/openzipkin/zipkin-api", - version = "1.0.0", - sha256 = "6c8ee2014cf0746ba452e5f2c01f038df60e85eb2d910b226f9aa27ddc0e44cf", - release_date = "2020-11-22", - strip_prefix = "zipkin-api-{version}", - urls = ["https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz"], - use_category = ["api"], - ), - com_google_googleapis = dict( - # TODO(dio): Consider writing a Starlark macro for importing Google API proto. - project_name = "Google APIs", - project_desc = "Public interface definitions of Google APIs", - project_url = "https://github.com/googleapis/googleapis", - version = "82944da21578a53b74e547774cf62ed31a05b841", - sha256 = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405", - release_date = "2019-12-02", - strip_prefix = "googleapis-{version}", - urls = ["https://github.com/googleapis/googleapis/archive/{version}.tar.gz"], - use_category = ["api"], - ), - opencensus_proto = dict( - project_name = "OpenCensus Proto", - project_desc = "Language Independent Interface Types For OpenCensus", - project_url = "https://github.com/census-instrumentation/opencensus-proto", - version = "0.3.0", - sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0", - release_date = "2020-07-21", - strip_prefix = "opencensus-proto-{version}/src", - urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz"], - use_category = ["api"], - ), - prometheus_metrics_model = dict( - project_name = "Prometheus client model", - project_desc = "Data model artifacts for Prometheus", - project_url = "https://github.com/prometheus/client_model", - version = "147c58e9608a4f9628b53b6cc863325ca746f63a", - sha256 = "f7da30879dcdfae367fa65af1969945c3148cfbfc462b30b7d36f17134675047", - release_date = "2021-06-07", - strip_prefix = "client_model-{version}", - urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"], - use_category = ["api"], - ), - rules_proto = dict( - project_name = "Protobuf Rules for Bazel", - project_desc = "Protocol buffer rules for Bazel", - project_url = "https://github.com/bazelbuild/rules_proto", - version = "f7a30f6f80006b591fa7c437fe5a951eb10bcbcf", - sha256 = "9fc210a34f0f9e7cc31598d109b5d069ef44911a82f507d5a88716db171615a8", - release_date = "2021-02-09", - strip_prefix = "rules_proto-{version}", - urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"], - use_category = ["api"], - ), - opentelemetry_proto = dict( - project_name = "OpenTelemetry Proto", - project_desc = "Language Independent Interface Types For OpenTelemetry", - project_url = "https://github.com/open-telemetry/opentelemetry-proto", - version = "0.9.0", - sha256 = "9ec38ab51eedbd7601979b0eda962cf37bc8a4dc35fcef604801e463f01dcc00", - release_date = "2021-05-12", - strip_prefix = "opentelemetry-proto-{version}", - urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/v{version}.tar.gz"], - use_category = ["api"], - ), - com_github_bufbuild_buf = dict( - project_name = "buf", - project_desc = "A new way of working with Protocol Buffers.", # Used for breaking change detection in API protobufs - project_url = "https://buf.build", - version = "0.53.0", - sha256 = "888bb52d358e34a8d6a57ecff426bed896bdf478ad13c78a70a9e1a9a2d75715", - strip_prefix = "buf", - urls = ["https://github.com/bufbuild/buf/releases/download/v{version}/buf-Linux-x86_64.tar.gz"], - release_date = "2021-08-25", - use_category = ["api"], - tags = ["manual"], - ), -) diff --git a/generated_api_shadow/bazel/repository_locations_utils.bzl b/generated_api_shadow/bazel/repository_locations_utils.bzl deleted file mode 100644 index 3b984e1bc580a..0000000000000 --- a/generated_api_shadow/bazel/repository_locations_utils.bzl +++ /dev/null @@ -1,20 +0,0 @@ -def _format_version(s, version): - return s.format(version = version, dash_version = version.replace(".", "-"), underscore_version = version.replace(".", "_")) - -# Generate a "repository location specification" from raw repository -# specification. The information should match the format required by -# external_deps.bzl. This function mostly does interpolation of {version} in -# the repository info fields. This code should be capable of running in both -# Python and Starlark. -def load_repository_locations_spec(repository_locations_spec): - locations = {} - for key, location in repository_locations_spec.items(): - mutable_location = dict(location) - locations[key] = mutable_location - - # Fixup with version information. - if "version" in location: - if "strip_prefix" in location: - mutable_location["strip_prefix"] = _format_version(location["strip_prefix"], location["version"]) - mutable_location["urls"] = [_format_version(url, location["version"]) for url in location["urls"]] - return locations diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/squash.proto b/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/squash.proto deleted file mode 100644 index f9bc9cceceb99..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/squash.proto +++ /dev/null @@ -1,60 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.squash.v3; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.squash.v3"; -option java_outer_classname = "SquashProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Squash] -// Squash :ref:`configuration overview `. -// [#extension: envoy.filters.http.squash] - -// [#next-free-field: 6] -message Squash { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.squash.v2.Squash"; - - // The name of the cluster that hosts the Squash server. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // When the filter requests the Squash server to create a DebugAttachment, it will use this - // structure as template for the body of the request. It can contain reference to environment - // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server - // with more information to find the process to attach the debugger to. For example, in a - // Istio/k8s environment, this will contain information on the pod: - // - // .. code-block:: json - // - // { - // "spec": { - // "attachment": { - // "pod": "{{ POD_NAME }}", - // "namespace": "{{ POD_NAMESPACE }}" - // }, - // "match_request": true - // } - // } - // - // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) - google.protobuf.Struct attachment_template = 2; - - // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second. - google.protobuf.Duration request_timeout = 3; - - // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60 - // seconds. - google.protobuf.Duration attachment_timeout = 4; - - // Amount of time to poll for the status of the attachment object in the Squash server - // (to check if has been attached). Defaults to 1 second. - google.protobuf.Duration attachment_poll_period = 5; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD deleted file mode 100644 index 3ca8242f77801..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto b/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto deleted file mode 100644 index b9efc278e6de8..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto +++ /dev/null @@ -1,67 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.sxg.v3alpha; - -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.sxg.v3alpha"; -option java_outer_classname = "SxgProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Signed HTTP Exchange Filter] -// SXG :ref:`configuration overview `. -// [#extension: envoy.filters.http.sxg] - -// [#next-free-field: 10] -message SXG { - // The SDS configuration for the public key data for the SSL certificate that will be used to sign the - // SXG response. - transport_sockets.tls.v3.SdsSecretConfig certificate = 1; - - // The SDS configuration for the private key data for the SSL certificate that will be used to sign the - // SXG response. - transport_sockets.tls.v3.SdsSecretConfig private_key = 2; - - // The duration for which the generated SXG package will be valid. Default is 604800s (7 days in seconds). - // Note that in order to account for clock skew, the timestamp will be backdated by a day. So, if duration - // is set to 7 days, that will be 7 days from 24 hours ago (6 days from now). Also note that while 6/7 days - // is appropriate for most content, if the downstream service is serving Javascript, or HTML with inline - // Javascript, 1 day (so, with backdated expiry, 2 days, or 172800 seconds) is more appropriate. - google.protobuf.Duration duration = 3; - - // The SXG response payload is Merkle Integrity Content Encoding (MICE) encoded (specification is [here](https://datatracker.ietf.org/doc/html/draft-thomson-http-mice-03)) - // This value indicates the record size in the encoded payload. The default value is 4096. - uint64 mi_record_size = 4; - - // The URI of certificate CBOR file published. Since it is required that the certificate CBOR file - // be served from the same domain as the SXG document, this should be a relative URI. - string cbor_url = 5 [(validate.rules).string = {min_len: 1 prefix: "/"}]; - - // URL to retrieve validity data for signature, a CBOR map. See specification [here](https://tools.ietf.org/html/draft-yasskin-httpbis-origin-signed-exchanges-impl-00#section-3.6) - string validity_url = 6 [(validate.rules).string = {min_len: 1 prefix: "/"}]; - - // Header that will be set if it is determined that the client can accept SXG (typically `accept: application/signed-exchange;v=b3) - // If not set, filter will default to: `x-client-can-accept-sxg` - string client_can_accept_sxg_header = 7 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true} - ]; - - // Header set by downstream service to signal that the response should be transformed to SXG If not set, - // filter will default to: `x-should-encode-sxg` - string should_encode_sxg_header = 8 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true} - ]; - - // Headers that will be stripped from the SXG document, by listing a prefix (i.e. `x-custom-` will cause - // all headers prefixed by `x-custom-` to be omitted from the SXG document) - repeated string header_prefix_filters = 9 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto deleted file mode 100644 index 0fac07427d0c0..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.kafka_broker.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_broker.v3"; -option java_outer_classname = "KafkaBrokerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Kafka Broker] -// Kafka Broker :ref:`configuration overview `. -// [#extension: envoy.filters.network.kafka_broker] - -message KafkaBroker { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker"; - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto deleted file mode 100644 index 03a6522852ab5..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.kafka_mesh.v3alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_mesh.v3alpha"; -option java_outer_classname = "KafkaMeshProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Kafka Mesh] -// Kafka Mesh :ref:`configuration overview `. -// [#extension: envoy.filters.network.kafka_mesh] - -message KafkaMesh { - // Envoy's host that's advertised to clients. - // Has the same meaning as corresponding Kafka broker properties. - // Usually equal to filter chain's listener config, but needs to be reachable by clients - // (so 0.0.0.0 will not work). - string advertised_host = 1 [(validate.rules).string = {min_len: 1}]; - - // Envoy's port that's advertised to clients. - int32 advertised_port = 2 [(validate.rules).int32 = {gt: 0}]; - - // Upstream clusters this filter will connect to. - repeated KafkaClusterDefinition upstream_clusters = 3; - - // Rules that will decide which cluster gets which request. - repeated ForwardingRule forwarding_rules = 4; -} - -message KafkaClusterDefinition { - // Cluster name. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // Kafka cluster address. - string bootstrap_servers = 2 [(validate.rules).string = {min_len: 1}]; - - // Default number of partitions present in this cluster. - // This is especially important for clients that do not specify partition in their payloads and depend on this value for hashing. - int32 partition_count = 3 [(validate.rules).int32 = {gt: 0}]; - - // Custom configuration passed to Kafka producer. - map producer_config = 4; -} - -message ForwardingRule { - // Cluster name. - string target_cluster = 1; - - oneof trigger { - // Intended place for future types of forwarding rules. - string topic_prefix = 2; - } -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto deleted file mode 100644 index 9dfdb14d3f11a..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.mysql_proxy.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.mysql_proxy.v3"; -option java_outer_classname = "MysqlProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: MySQL proxy] -// MySQL Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.mysql_proxy] - -message MySQLProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy"; - - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // [#not-implemented-hide:] The optional path to use for writing MySQL access logs. - // If the access log field is empty, access logs will not be written. - string access_log = 2; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto deleted file mode 100644 index 8fe98f269626d..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.postgres_proxy.v3alpha; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.postgres_proxy.v3alpha"; -option java_outer_classname = "PostgresProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Postgres proxy] -// Postgres Proxy :ref:`configuration overview -// `. -// [#extension: envoy.filters.network.postgres_proxy] - -message PostgresProxy { - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Controls whether SQL statements received in Frontend Query messages - // are parsed. Parsing is required to produce Postgres proxy filter - // metadata. Defaults to true. - google.protobuf.BoolValue enable_sql_parsing = 2; - - // Controls whether to terminate SSL session initiated by a client. - // If the value is false, the Postgres proxy filter will not try to - // terminate SSL session, but will pass all the packets to the upstream server. - // If the value is true, the Postgres proxy filter will try to terminate SSL - // session. In order to do that, the filter chain must use :ref:`starttls transport socket - // `. - // If the filter does not manage to terminate the SSL session, it will close the connection from the client. - // Refer to official documentation for details - // `SSL Session Encryption Message Flow `_. - bool terminate_ssl = 3; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD deleted file mode 100644 index 2f90ace882d93..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto deleted file mode 100644 index 12438751fada6..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rocketmq_proxy.v3; - -import "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; -option java_outer_classname = "RocketmqProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: RocketMQ Proxy] -// RocketMQ Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.rocketmq_proxy] - -message RocketmqProxy { - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The route table for the connection manager is specified in this property. - RouteConfiguration route_config = 2; - - // The largest duration transient object expected to live, more than 10s is recommended. - google.protobuf.Duration transient_object_life_span = 3; - - // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting - // facility without considering backward compatibility of exiting RocketMQ client SDK. - bool develop_mode = 4; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto deleted file mode 100644 index 6ec6c71c5627d..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rocketmq_proxy.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rocketmq Proxy Route Configuration] -// Rocketmq Proxy :ref:`configuration overview `. - -message RouteConfiguration { - // The name of the route configuration. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - // The name of the topic. - type.matcher.v3.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated config.route.v3.HeaderMatcher headers = 2; -} - -message RouteAction { - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. - config.core.v3.Metadata metadata_match = 2; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/BUILD b/generated_api_shadow/envoy/admin/v2alpha/BUILD deleted file mode 100644 index 6fe8cb995d343..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/config/bootstrap/v2:pkg", - "//envoy/service/tap/v2alpha:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/admin/v2alpha/certs.proto b/generated_api_shadow/envoy/admin/v2alpha/certs.proto deleted file mode 100644 index c7b568ca1e58a..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/certs.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "CertsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Certificates] - -// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to -// display certificate information. See :ref:`/certs ` for more -// information. -message Certificates { - // List of certificates known to an Envoy. - repeated Certificate certificates = 1; -} - -message Certificate { - // Details of CA certificate. - repeated CertificateDetails ca_cert = 1; - - // Details of Certificate Chain - repeated CertificateDetails cert_chain = 2; -} - -// [#next-free-field: 7] -message CertificateDetails { - // Path of the certificate. - string path = 1; - - // Certificate Serial Number. - string serial_number = 2; - - // List of Subject Alternate names. - repeated SubjectAlternateName subject_alt_names = 3; - - // Minimum of days until expiration of certificate and it's chain. - uint64 days_until_expiration = 4; - - // Indicates the time from which the certificate is valid. - google.protobuf.Timestamp valid_from = 5; - - // Indicates the time at which the certificate expires. - google.protobuf.Timestamp expiration_time = 6; -} - -message SubjectAlternateName { - // Subject Alternate Name. - oneof name { - string dns = 1; - - string uri = 2; - - string ip_address = 3; - } -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/clusters.proto b/generated_api_shadow/envoy/admin/v2alpha/clusters.proto deleted file mode 100644 index 3b7ec029aa630..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/clusters.proto +++ /dev/null @@ -1,153 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "envoy/admin/v2alpha/metrics.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/health_check.proto"; -import "envoy/type/percent.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "ClustersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Clusters] - -// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. -// See :ref:`/clusters ` for more information. -message Clusters { - // Mapping from cluster name to each cluster's status. - repeated ClusterStatus cluster_statuses = 1; -} - -// Details an individual cluster's current status. -// [#next-free-field: 6] -message ClusterStatus { - // Name of the cluster. - string name = 1; - - // Denotes whether this cluster was added via API or configured statically. - bool added_via_api = 2; - - // The success rate threshold used in the last interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used to calculate the threshold. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used to calculate the threshold. - // The threshold is used to eject hosts based on their success rate. See - // :ref:`Cluster outlier detection ` documentation for details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.Percent success_rate_ejection_threshold = 3; - - // Mapping from host address to the host's current status. - repeated HostStatus host_statuses = 4; - - // The success rate threshold used in the last interval when only locally originated failures were - // taken into account and externally originated errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. The threshold is used to eject hosts based on their success rate. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.Percent local_origin_success_rate_ejection_threshold = 5; -} - -// Current state of a particular host. -// [#next-free-field: 10] -message HostStatus { - // Address of this host. - api.v2.core.Address address = 1; - - // List of stats specific to this host. - repeated SimpleMetric stats = 2; - - // The host's current health status. - HostHealthStatus health_status = 3; - - // Request success rate for this host over the last calculated interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used in success rate - // calculation. If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used in success rate calculation. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.Percent success_rate = 4; - - // The host's weight. If not configured, the value defaults to 1. - uint32 weight = 5; - - // The hostname of the host, if applicable. - string hostname = 6; - - // The host's priority. If not configured, the value defaults to 0 (highest priority). - uint32 priority = 7; - - // Request success rate for this host over the last calculated - // interval when only locally originated errors are taken into account and externally originated - // errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.Percent local_origin_success_rate = 8; - - // locality of the host. - api.v2.core.Locality locality = 9; -} - -// Health status for a host. -// [#next-free-field: 7] -message HostHealthStatus { - // The host is currently failing active health checks. - bool failed_active_health_check = 1; - - // The host is currently considered an outlier and has been ejected. - bool failed_outlier_check = 2; - - // The host is currently being marked as degraded through active health checking. - bool failed_active_degraded_check = 4; - - // The host has been removed from service discovery, but is being stabilized due to active - // health checking. - bool pending_dynamic_removal = 5; - - // The host has not yet been health checked. - bool pending_active_hc = 6; - - // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported - // here. - // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] - api.v2.core.HealthStatus eds_health_status = 3; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto b/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto deleted file mode 100644 index 833c015fb4749..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto +++ /dev/null @@ -1,291 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "envoy/config/bootstrap/v2/bootstrap.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "ConfigDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: ConfigDump] - -// The :ref:`/config_dump ` admin endpoint uses this wrapper -// message to maintain and serve arbitrary configuration information from any component in Envoy. -message ConfigDump { - // This list is serialized and dumped in its entirety at the - // :ref:`/config_dump ` endpoint. - // - // The following configurations are currently supported and will be dumped in the order given - // below: - // - // * *bootstrap*: :ref:`BootstrapConfigDump ` - // * *clusters*: :ref:`ClustersConfigDump ` - // * *listeners*: :ref:`ListenersConfigDump ` - // * *routes*: :ref:`RoutesConfigDump ` - // - // You can filter output with the resource and mask query parameters. - // See :ref:`/config_dump?resource={} `, - // :ref:`/config_dump?mask={} `, - // or :ref:`/config_dump?resource={},mask={} - // ` for more information. - repeated google.protobuf.Any configs = 1; -} - -message UpdateFailureState { - // What the component configuration would have been if the update had succeeded. - google.protobuf.Any failed_configuration = 1; - - // Time of the latest failed update attempt. - google.protobuf.Timestamp last_update_attempt = 2; - - // Details about the last failed update attempt. - string details = 3; -} - -// This message describes the bootstrap configuration that Envoy was started with. This includes -// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate -// the static portions of an Envoy configuration by reusing the output as the bootstrap -// configuration for another Envoy. -message BootstrapConfigDump { - config.bootstrap.v2.Bootstrap bootstrap = 1; - - // The timestamp when the BootstrapConfig was last updated. - google.protobuf.Timestamp last_updated = 2; -} - -// Envoy's listener manager fills this message with all currently known listeners. Listener -// configuration information can be used to recreate an Envoy configuration by populating all -// listeners as static listeners or by returning them in a LDS response. -message ListenersConfigDump { - // Describes a statically loaded listener. - message StaticListener { - // The listener config. - google.protobuf.Any listener = 1; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 2; - } - - message DynamicListenerState { - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the listener was loaded. In the future, discrete per-listener versions may be supported - // by the API. - string version_info = 1; - - // The listener config. - google.protobuf.Any listener = 2; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 3; - } - - // Describes a dynamically loaded listener via the LDS API. - // [#next-free-field: 6] - message DynamicListener { - // The name or unique id of this listener, pulled from the DynamicListenerState config. - string name = 1; - - // The listener state for any active listener by this name. - // These are listeners that are available to service data plane traffic. - DynamicListenerState active_state = 2; - - // The listener state for any warming listener by this name. - // These are listeners that are currently undergoing warming in preparation to service data - // plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the warming listeners should generally be discarded. - DynamicListenerState warming_state = 3; - - // The listener state for any draining listener by this name. - // These are listeners that are currently undergoing draining in preparation to stop servicing - // data plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the draining listeners should generally be discarded. - DynamicListenerState draining_state = 4; - - // Set if the last update failed, cleared after the next successful update. - UpdateFailureState error_state = 5; - } - - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - string version_info = 1; - - // The statically loaded listener configs. - repeated StaticListener static_listeners = 2; - - // State for any warming, active, or draining listeners. - repeated DynamicListener dynamic_listeners = 3; -} - -// Envoy's cluster manager fills this message with all currently known clusters. Cluster -// configuration information can be used to recreate an Envoy configuration by populating all -// clusters as static clusters or by returning them in a CDS response. -message ClustersConfigDump { - // Describes a statically loaded cluster. - message StaticCluster { - // The cluster config. - google.protobuf.Any cluster = 1; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // Describes a dynamically loaded cluster via the CDS API. - message DynamicCluster { - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by - // the API. - string version_info = 1; - - // The cluster config. - google.protobuf.Any cluster = 2; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - string version_info = 1; - - // The statically loaded cluster configs. - repeated StaticCluster static_clusters = 2; - - // The dynamically loaded active clusters. These are clusters that are available to service - // data plane traffic. - repeated DynamicCluster dynamic_active_clusters = 3; - - // The dynamically loaded warming clusters. These are clusters that are currently undergoing - // warming in preparation to service data plane traffic. Note that if attempting to recreate an - // Envoy configuration from a configuration dump, the warming clusters should generally be - // discarded. - repeated DynamicCluster dynamic_warming_clusters = 4; -} - -// Envoy's RDS implementation fills this message with all currently loaded routes, as described by -// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration -// or defined inline while configuring listeners are separated from those configured dynamically via RDS. -// Route configuration information can be used to recreate an Envoy configuration by populating all routes -// as static routes or by returning them in RDS responses. -message RoutesConfigDump { - message StaticRouteConfig { - // The route config. - google.protobuf.Any route_config = 1; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - message DynamicRouteConfig { - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the route configuration was loaded. - string version_info = 1; - - // The route config. - google.protobuf.Any route_config = 2; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - // The statically loaded route configs. - repeated StaticRouteConfig static_route_configs = 2; - - // The dynamically loaded route configs. - repeated DynamicRouteConfig dynamic_route_configs = 3; -} - -// Envoy's scoped RDS implementation fills this message with all currently loaded route -// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both -// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the -// dynamically obtained scopes via the SRDS API. -message ScopedRoutesConfigDump { - message InlineScopedRouteConfigs { - // The name assigned to the scoped route configurations. - string name = 1; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 2; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - message DynamicScopedRouteConfigs { - // The name assigned to the scoped route configurations. - string name = 1; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the scoped routes configuration was loaded. - string version_info = 2; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 3; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 4; - } - - // The statically loaded scoped route configs. - repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; - - // The dynamically loaded scoped route configs. - repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; -} - -// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. -message SecretsConfigDump { - // DynamicSecret contains secret information fetched via SDS. - message DynamicSecret { - // The name assigned to the secret. - string name = 1; - - // This is the per-resource version information. - string version_info = 2; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 3; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 4; - } - - // StaticSecret specifies statically loaded secret in bootstrap. - message StaticSecret { - // The name assigned to the secret. - string name = 1; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 2; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 3; - } - - // The statically loaded secrets. - repeated StaticSecret static_secrets = 1; - - // The dynamically loaded active secrets. These are secrets that are available to service - // clusters or listeners. - repeated DynamicSecret dynamic_active_secrets = 2; - - // The dynamically loaded warming secrets. These are secrets that are currently undergoing - // warming in preparation to service clusters or listeners. - repeated DynamicSecret dynamic_warming_secrets = 3; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/listeners.proto b/generated_api_shadow/envoy/admin/v2alpha/listeners.proto deleted file mode 100644 index ca7b736521d0d..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/listeners.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "envoy/api/v2/core/address.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "ListenersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Listeners] - -// Admin endpoint uses this wrapper for `/listeners` to display listener status information. -// See :ref:`/listeners ` for more information. -message Listeners { - // List of listener statuses. - repeated ListenerStatus listener_statuses = 1; -} - -// Details an individual listener's current status. -message ListenerStatus { - // Name of the listener - string name = 1; - - // The actual local address that the listener is listening on. If a listener was configured - // to listen on port 0, then this address has the port that was allocated by the OS. - api.v2.core.Address local_address = 2; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/memory.proto b/generated_api_shadow/envoy/admin/v2alpha/memory.proto deleted file mode 100644 index 85fd2169d6d70..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/memory.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "MemoryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Memory] - -// Proto representation of the internal memory consumption of an Envoy instance. These represent -// values extracted from an internal TCMalloc instance. For more information, see the section of the -// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). -// [#next-free-field: 7] -message Memory { - // The number of bytes allocated by the heap for Envoy. This is an alias for - // `generic.current_allocated_bytes`. - uint64 allocated = 1; - - // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for - // `generic.heap_size`. - uint64 heap_size = 2; - - // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards - // virtual memory usage, and depending on the OS, typically do not count towards physical memory - // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. - uint64 pageheap_unmapped = 3; - - // The number of bytes in free, mapped pages in the page heap. These bytes always count towards - // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also - // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. - uint64 pageheap_free = 4; - - // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias - // for `tcmalloc.current_total_thread_cache_bytes`. - uint64 total_thread_cache = 5; - - // The number of bytes of the physical memory usage by the allocator. This is an alias for - // `generic.total_physical_bytes`. - uint64 total_physical_bytes = 6; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/metrics.proto b/generated_api_shadow/envoy/admin/v2alpha/metrics.proto deleted file mode 100644 index 15ad219c13e58..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/metrics.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "MetricsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metrics] - -// Proto representation of an Envoy Counter or Gauge value. -message SimpleMetric { - enum Type { - COUNTER = 0; - GAUGE = 1; - } - - // Type of the metric represented. - Type type = 1; - - // Current metric value. - uint64 value = 2; - - // Name of the metric. - string name = 3; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto b/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto deleted file mode 100644 index 22c65f3de5a64..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "MutexStatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: MutexStats] - -// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run -// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` -// [docs](https://abseil.io/about/design/mutex#extra-features). -// -// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not -// correspond to core clock frequency. For more information, see the `CycleClock` -// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). -message MutexStats { - // The number of individual mutex contentions which have occurred since startup. - uint64 num_contentions = 1; - - // The length of the current contention wait cycle. - uint64 current_wait_cycles = 2; - - // The lifetime total of all contention wait cycles. - uint64 lifetime_wait_cycles = 3; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/server_info.proto b/generated_api_shadow/envoy/admin/v2alpha/server_info.proto deleted file mode 100644 index b9db6bbc1e1fb..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/server_info.proto +++ /dev/null @@ -1,154 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "ServerInfoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Server State] - -// Proto representation of the value returned by /server_info, containing -// server version/server status information. -// [#next-free-field: 7] -message ServerInfo { - enum State { - // Server is live and serving traffic. - LIVE = 0; - - // Server is draining listeners in response to external health checks failing. - DRAINING = 1; - - // Server has not yet completed cluster manager initialization. - PRE_INITIALIZING = 2; - - // Server is running the cluster manager initialization callbacks (e.g., RDS). - INITIALIZING = 3; - } - - // Server version. - string version = 1; - - // State of the server. - State state = 2; - - // Uptime since current epoch was started. - google.protobuf.Duration uptime_current_epoch = 3; - - // Uptime since the start of the first epoch. - google.protobuf.Duration uptime_all_epochs = 4; - - // Hot restart version. - string hot_restart_version = 5; - - // Command line options the server is currently running with. - CommandLineOptions command_line_options = 6; -} - -// [#next-free-field: 29] -message CommandLineOptions { - enum IpVersion { - v4 = 0; - v6 = 1; - } - - enum Mode { - // Validate configs and then serve traffic normally. - Serve = 0; - - // Validate configs and exit. - Validate = 1; - - // Completely load and initialize the config, and then exit without running the listener loop. - InitOnly = 2; - } - - reserved 12; - - // See :option:`--base-id` for details. - uint64 base_id = 1; - - // See :option:`--concurrency` for details. - uint32 concurrency = 2; - - // See :option:`--config-path` for details. - string config_path = 3; - - // See :option:`--config-yaml` for details. - string config_yaml = 4; - - // See :option:`--allow-unknown-static-fields` for details. - bool allow_unknown_static_fields = 5; - - // See :option:`--reject-unknown-dynamic-fields` for details. - bool reject_unknown_dynamic_fields = 26; - - // See :option:`--admin-address-path` for details. - string admin_address_path = 6; - - // See :option:`--local-address-ip-version` for details. - IpVersion local_address_ip_version = 7; - - // See :option:`--log-level` for details. - string log_level = 8; - - // See :option:`--component-log-level` for details. - string component_log_level = 9; - - // See :option:`--log-format` for details. - string log_format = 10; - - // See :option:`--log-format-escaped` for details. - bool log_format_escaped = 27; - - // See :option:`--log-path` for details. - string log_path = 11; - - // See :option:`--service-cluster` for details. - string service_cluster = 13; - - // See :option:`--service-node` for details. - string service_node = 14; - - // See :option:`--service-zone` for details. - string service_zone = 15; - - // See :option:`--file-flush-interval-msec` for details. - google.protobuf.Duration file_flush_interval = 16; - - // See :option:`--drain-time-s` for details. - google.protobuf.Duration drain_time = 17; - - // See :option:`--parent-shutdown-time-s` for details. - google.protobuf.Duration parent_shutdown_time = 18; - - // See :option:`--mode` for details. - Mode mode = 19; - - // max_stats and max_obj_name_len are now unused and have no effect. - uint64 max_stats = 20 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - uint64 max_obj_name_len = 21 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // See :option:`--disable-hot-restart` for details. - bool disable_hot_restart = 22; - - // See :option:`--enable-mutex-tracing` for details. - bool enable_mutex_tracing = 23; - - // See :option:`--restart-epoch` for details. - uint32 restart_epoch = 24; - - // See :option:`--cpuset-threads` for details. - bool cpuset_threads = 25; - - // See :option:`--disable-extensions` for details. - repeated string disabled_extensions = 28; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/tap.proto b/generated_api_shadow/envoy/admin/v2alpha/tap.proto deleted file mode 100644 index 6335b4db62841..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/tap.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "envoy/service/tap/v2alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap] - -// The /tap admin request body that is used to configure an active tap session. -message TapRequest { - // The opaque configuration ID used to match the configuration to a loaded extension. - // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The tap configuration to load. - service.tap.v2alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/admin/v3/BUILD b/generated_api_shadow/envoy/admin/v3/BUILD deleted file mode 100644 index 38eadcb09feaa..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/admin/v2alpha:pkg", - "//envoy/annotations:pkg", - "//envoy/config/bootstrap/v3:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/tap/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/admin/v3/certs.proto b/generated_api_shadow/envoy/admin/v3/certs.proto deleted file mode 100644 index 5580bb5ef17d1..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/certs.proto +++ /dev/null @@ -1,84 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "CertsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Certificates] - -// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to -// display certificate information. See :ref:`/certs ` for more -// information. -message Certificates { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Certificates"; - - // List of certificates known to an Envoy. - repeated Certificate certificates = 1; -} - -message Certificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Certificate"; - - // Details of CA certificate. - repeated CertificateDetails ca_cert = 1; - - // Details of Certificate Chain - repeated CertificateDetails cert_chain = 2; -} - -// [#next-free-field: 8] -message CertificateDetails { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.CertificateDetails"; - - message OcspDetails { - // Indicates the time from which the OCSP response is valid. - google.protobuf.Timestamp valid_from = 1; - - // Indicates the time at which the OCSP response expires. - google.protobuf.Timestamp expiration = 2; - } - - // Path of the certificate. - string path = 1; - - // Certificate Serial Number. - string serial_number = 2; - - // List of Subject Alternate names. - repeated SubjectAlternateName subject_alt_names = 3; - - // Minimum of days until expiration of certificate and it's chain. - uint64 days_until_expiration = 4; - - // Indicates the time from which the certificate is valid. - google.protobuf.Timestamp valid_from = 5; - - // Indicates the time at which the certificate expires. - google.protobuf.Timestamp expiration_time = 6; - - // Details related to the OCSP response associated with this certificate, if any. - OcspDetails ocsp_details = 7; -} - -message SubjectAlternateName { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.SubjectAlternateName"; - - // Subject Alternate Name. - oneof name { - string dns = 1; - - string uri = 2; - - string ip_address = 3; - } -} diff --git a/generated_api_shadow/envoy/admin/v3/clusters.proto b/generated_api_shadow/envoy/admin/v3/clusters.proto deleted file mode 100644 index 509280f466243..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/clusters.proto +++ /dev/null @@ -1,177 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/admin/v3/metrics.proto"; -import "envoy/config/cluster/v3/circuit_breaker.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/health_check.proto"; -import "envoy/type/v3/percent.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "ClustersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Clusters] - -// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. -// See :ref:`/clusters ` for more information. -message Clusters { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Clusters"; - - // Mapping from cluster name to each cluster's status. - repeated ClusterStatus cluster_statuses = 1; -} - -// Details an individual cluster's current status. -// [#next-free-field: 8] -message ClusterStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClusterStatus"; - - // Name of the cluster. - string name = 1; - - // Denotes whether this cluster was added via API or configured statically. - bool added_via_api = 2; - - // The success rate threshold used in the last interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used to calculate the threshold. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used to calculate the threshold. - // The threshold is used to eject hosts based on their success rate. See - // :ref:`Cluster outlier detection ` documentation for details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.v3.Percent success_rate_ejection_threshold = 3; - - // Mapping from host address to the host's current status. - repeated HostStatus host_statuses = 4; - - // The success rate threshold used in the last interval when only locally originated failures were - // taken into account and externally originated errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. The threshold is used to eject hosts based on their success rate. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.v3.Percent local_origin_success_rate_ejection_threshold = 5; - - // :ref:`Circuit breaking ` settings of the cluster. - config.cluster.v3.CircuitBreakers circuit_breakers = 6; - - // Observability name of the cluster. - string observability_name = 7; -} - -// Current state of a particular host. -// [#next-free-field: 10] -message HostStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.HostStatus"; - - // Address of this host. - config.core.v3.Address address = 1; - - // List of stats specific to this host. - repeated SimpleMetric stats = 2; - - // The host's current health status. - HostHealthStatus health_status = 3; - - // Request success rate for this host over the last calculated interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used in success rate - // calculation. If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used in success rate calculation. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.v3.Percent success_rate = 4; - - // The host's weight. If not configured, the value defaults to 1. - uint32 weight = 5; - - // The hostname of the host, if applicable. - string hostname = 6; - - // The host's priority. If not configured, the value defaults to 0 (highest priority). - uint32 priority = 7; - - // Request success rate for this host over the last calculated - // interval when only locally originated errors are taken into account and externally originated - // errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.v3.Percent local_origin_success_rate = 8; - - // locality of the host. - config.core.v3.Locality locality = 9; -} - -// Health status for a host. -// [#next-free-field: 9] -message HostHealthStatus { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.HostHealthStatus"; - - // The host is currently failing active health checks. - bool failed_active_health_check = 1; - - // The host is currently considered an outlier and has been ejected. - bool failed_outlier_check = 2; - - // The host is currently being marked as degraded through active health checking. - bool failed_active_degraded_check = 4; - - // The host has been removed from service discovery, but is being stabilized due to active - // health checking. - bool pending_dynamic_removal = 5; - - // The host has not yet been health checked. - bool pending_active_hc = 6; - - // The host should be excluded from panic, spillover, etc. calculations because it was explicitly - // taken out of rotation via protocol signal and is not meant to be routed to. - bool excluded_via_immediate_hc_fail = 7; - - // The host failed active HC due to timeout. - bool active_hc_timeout = 8; - - // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported - // here. - // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] - config.core.v3.HealthStatus eds_health_status = 3; -} diff --git a/generated_api_shadow/envoy/admin/v3/config_dump.proto b/generated_api_shadow/envoy/admin/v3/config_dump.proto deleted file mode 100644 index ddafb56b39362..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/config_dump.proto +++ /dev/null @@ -1,482 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/config/bootstrap/v3/bootstrap.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "ConfigDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: ConfigDump] - -// Resource status from the view of a xDS client, which tells the synchronization -// status between the xDS client and the xDS server. -enum ClientResourceStatus { - // Resource status is not available/unknown. - UNKNOWN = 0; - - // Client requested this resource but hasn't received any update from management - // server. The client will not fail requests, but will queue them until update - // arrives or the client times out waiting for the resource. - REQUESTED = 1; - - // This resource has been requested by the client but has either not been - // delivered by the server or was previously delivered by the server and then - // subsequently removed from resources provided by the server. For more - // information, please refer to the :ref:`"Knowing When a Requested Resource - // Does Not Exist" ` section. - DOES_NOT_EXIST = 2; - - // Client received this resource and replied with ACK. - ACKED = 3; - - // Client received this resource and replied with NACK. - NACKED = 4; -} - -// The :ref:`/config_dump ` admin endpoint uses this wrapper -// message to maintain and serve arbitrary configuration information from any component in Envoy. -message ConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ConfigDump"; - - // This list is serialized and dumped in its entirety at the - // :ref:`/config_dump ` endpoint. - // - // The following configurations are currently supported and will be dumped in the order given - // below: - // - // * *bootstrap*: :ref:`BootstrapConfigDump ` - // * *clusters*: :ref:`ClustersConfigDump ` - // * *endpoints*: :ref:`EndpointsConfigDump ` - // * *listeners*: :ref:`ListenersConfigDump ` - // * *scoped_routes*: :ref:`ScopedRoutesConfigDump ` - // * *routes*: :ref:`RoutesConfigDump ` - // * *secrets*: :ref:`SecretsConfigDump ` - // - // EDS Configuration will only be dumped by using parameter `?include_eds` - // - // You can filter output with the resource and mask query parameters. - // See :ref:`/config_dump?resource={} `, - // :ref:`/config_dump?mask={} `, - // or :ref:`/config_dump?resource={},mask={} - // ` for more information. - repeated google.protobuf.Any configs = 1; -} - -message UpdateFailureState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.UpdateFailureState"; - - // What the component configuration would have been if the update had succeeded. - // This field may not be populated by xDS clients due to storage overhead. - google.protobuf.Any failed_configuration = 1; - - // Time of the latest failed update attempt. - google.protobuf.Timestamp last_update_attempt = 2; - - // Details about the last failed update attempt. - string details = 3; - - // This is the version of the rejected resource. - // [#not-implemented-hide:] - string version_info = 4; -} - -// This message describes the bootstrap configuration that Envoy was started with. This includes -// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate -// the static portions of an Envoy configuration by reusing the output as the bootstrap -// configuration for another Envoy. -message BootstrapConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.BootstrapConfigDump"; - - config.bootstrap.v3.Bootstrap bootstrap = 1; - - // The timestamp when the BootstrapConfig was last updated. - google.protobuf.Timestamp last_updated = 2; -} - -// Envoy's listener manager fills this message with all currently known listeners. Listener -// configuration information can be used to recreate an Envoy configuration by populating all -// listeners as static listeners or by returning them in a LDS response. -message ListenersConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ListenersConfigDump"; - - // Describes a statically loaded listener. - message StaticListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ListenersConfigDump.StaticListener"; - - // The listener config. - google.protobuf.Any listener = 1; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 2; - } - - message DynamicListenerState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ListenersConfigDump.DynamicListenerState"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the listener was loaded. In the future, discrete per-listener versions may be supported - // by the API. - string version_info = 1; - - // The listener config. - google.protobuf.Any listener = 2; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 3; - } - - // Describes a dynamically loaded listener via the LDS API. - // [#next-free-field: 7] - message DynamicListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ListenersConfigDump.DynamicListener"; - - // The name or unique id of this listener, pulled from the DynamicListenerState config. - string name = 1; - - // The listener state for any active listener by this name. - // These are listeners that are available to service data plane traffic. - DynamicListenerState active_state = 2; - - // The listener state for any warming listener by this name. - // These are listeners that are currently undergoing warming in preparation to service data - // plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the warming listeners should generally be discarded. - DynamicListenerState warming_state = 3; - - // The listener state for any draining listener by this name. - // These are listeners that are currently undergoing draining in preparation to stop servicing - // data plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the draining listeners should generally be discarded. - DynamicListenerState draining_state = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - string version_info = 1; - - // The statically loaded listener configs. - repeated StaticListener static_listeners = 2; - - // State for any warming, active, or draining listeners. - repeated DynamicListener dynamic_listeners = 3; -} - -// Envoy's cluster manager fills this message with all currently known clusters. Cluster -// configuration information can be used to recreate an Envoy configuration by populating all -// clusters as static clusters or by returning them in a CDS response. -message ClustersConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ClustersConfigDump"; - - // Describes a statically loaded cluster. - message StaticCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ClustersConfigDump.StaticCluster"; - - // The cluster config. - google.protobuf.Any cluster = 1; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // Describes a dynamically loaded cluster via the CDS API. - // [#next-free-field: 6] - message DynamicCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ClustersConfigDump.DynamicCluster"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by - // the API. - string version_info = 1; - - // The cluster config. - google.protobuf.Any cluster = 2; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - string version_info = 1; - - // The statically loaded cluster configs. - repeated StaticCluster static_clusters = 2; - - // The dynamically loaded active clusters. These are clusters that are available to service - // data plane traffic. - repeated DynamicCluster dynamic_active_clusters = 3; - - // The dynamically loaded warming clusters. These are clusters that are currently undergoing - // warming in preparation to service data plane traffic. Note that if attempting to recreate an - // Envoy configuration from a configuration dump, the warming clusters should generally be - // discarded. - repeated DynamicCluster dynamic_warming_clusters = 4; -} - -// Envoy's RDS implementation fills this message with all currently loaded routes, as described by -// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration -// or defined inline while configuring listeners are separated from those configured dynamically via RDS. -// Route configuration information can be used to recreate an Envoy configuration by populating all routes -// as static routes or by returning them in RDS responses. -message RoutesConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.RoutesConfigDump"; - - message StaticRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.RoutesConfigDump.StaticRouteConfig"; - - // The route config. - google.protobuf.Any route_config = 1; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // [#next-free-field: 6] - message DynamicRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.RoutesConfigDump.DynamicRouteConfig"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the route configuration was loaded. - string version_info = 1; - - // The route config. - google.protobuf.Any route_config = 2; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // The statically loaded route configs. - repeated StaticRouteConfig static_route_configs = 2; - - // The dynamically loaded route configs. - repeated DynamicRouteConfig dynamic_route_configs = 3; -} - -// Envoy's scoped RDS implementation fills this message with all currently loaded route -// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both -// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the -// dynamically obtained scopes via the SRDS API. -message ScopedRoutesConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ScopedRoutesConfigDump"; - - message InlineScopedRouteConfigs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ScopedRoutesConfigDump.InlineScopedRouteConfigs"; - - // The name assigned to the scoped route configurations. - string name = 1; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 2; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - // [#next-free-field: 7] - message DynamicScopedRouteConfigs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ScopedRoutesConfigDump.DynamicScopedRouteConfigs"; - - // The name assigned to the scoped route configurations. - string name = 1; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the scoped routes configuration was loaded. - string version_info = 2; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 3; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // The statically loaded scoped route configs. - repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; - - // The dynamically loaded scoped route configs. - repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; -} - -// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. -message SecretsConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.SecretsConfigDump"; - - // DynamicSecret contains secret information fetched via SDS. - // [#next-free-field: 7] - message DynamicSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.SecretsConfigDump.DynamicSecret"; - - // The name assigned to the secret. - string name = 1; - - // This is the per-resource version information. - string version_info = 2; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 3; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // StaticSecret specifies statically loaded secret in bootstrap. - message StaticSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.SecretsConfigDump.StaticSecret"; - - // The name assigned to the secret. - string name = 1; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 2; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 3; - } - - // The statically loaded secrets. - repeated StaticSecret static_secrets = 1; - - // The dynamically loaded active secrets. These are secrets that are available to service - // clusters or listeners. - repeated DynamicSecret dynamic_active_secrets = 2; - - // The dynamically loaded warming secrets. These are secrets that are currently undergoing - // warming in preparation to service clusters or listeners. - repeated DynamicSecret dynamic_warming_secrets = 3; -} - -// Envoy's admin fill this message with all currently known endpoints. Endpoint -// configuration information can be used to recreate an Envoy configuration by populating all -// endpoints as static endpoints or by returning them in an EDS response. -message EndpointsConfigDump { - message StaticEndpointConfig { - // The endpoint config. - google.protobuf.Any endpoint_config = 1; - - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // [#next-free-field: 6] - message DynamicEndpointConfig { - // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the endpoint configuration was loaded. - string version_info = 1; - - // The endpoint config. - google.protobuf.Any endpoint_config = 2; - - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // The statically loaded endpoint configs. - repeated StaticEndpointConfig static_endpoint_configs = 2; - - // The dynamically loaded endpoint configs. - repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; -} diff --git a/generated_api_shadow/envoy/admin/v3/init_dump.proto b/generated_api_shadow/envoy/admin/v3/init_dump.proto deleted file mode 100644 index 0c2eb738c4310..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/init_dump.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "InitDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: InitDump] - -// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers, -// which provides the information of their unready targets. -// The :ref:`/init_dump ` will dump all unready targets information. -message UnreadyTargetsDumps { - // Message of unready targets information of an init manager. - message UnreadyTargetsDump { - // Name of the init manager. Example: "init_manager_xxx". - string name = 1; - - // Names of unready targets of the init manager. Example: "target_xxx". - repeated string target_names = 2; - } - - // You can choose specific component to dump unready targets with mask query parameter. - // See :ref:`/init_dump?mask={} ` for more information. - // The dumps of unready targets of all init managers. - repeated UnreadyTargetsDump unready_targets_dumps = 1; -} diff --git a/generated_api_shadow/envoy/admin/v3/listeners.proto b/generated_api_shadow/envoy/admin/v3/listeners.proto deleted file mode 100644 index 6197a44e4243f..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/listeners.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/config/core/v3/address.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "ListenersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Listeners] - -// Admin endpoint uses this wrapper for `/listeners` to display listener status information. -// See :ref:`/listeners ` for more information. -message Listeners { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Listeners"; - - // List of listener statuses. - repeated ListenerStatus listener_statuses = 1; -} - -// Details an individual listener's current status. -message ListenerStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ListenerStatus"; - - // Name of the listener - string name = 1; - - // The actual local address that the listener is listening on. If a listener was configured - // to listen on port 0, then this address has the port that was allocated by the OS. - config.core.v3.Address local_address = 2; -} diff --git a/generated_api_shadow/envoy/admin/v3/memory.proto b/generated_api_shadow/envoy/admin/v3/memory.proto deleted file mode 100644 index bcf9f271748d8..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/memory.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "MemoryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Memory] - -// Proto representation of the internal memory consumption of an Envoy instance. These represent -// values extracted from an internal TCMalloc instance. For more information, see the section of the -// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). -// [#next-free-field: 7] -message Memory { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Memory"; - - // The number of bytes allocated by the heap for Envoy. This is an alias for - // `generic.current_allocated_bytes`. - uint64 allocated = 1; - - // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for - // `generic.heap_size`. - uint64 heap_size = 2; - - // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards - // virtual memory usage, and depending on the OS, typically do not count towards physical memory - // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. - uint64 pageheap_unmapped = 3; - - // The number of bytes in free, mapped pages in the page heap. These bytes always count towards - // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also - // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. - uint64 pageheap_free = 4; - - // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias - // for `tcmalloc.current_total_thread_cache_bytes`. - uint64 total_thread_cache = 5; - - // The number of bytes of the physical memory usage by the allocator. This is an alias for - // `generic.total_physical_bytes`. - uint64 total_physical_bytes = 6; -} diff --git a/generated_api_shadow/envoy/admin/v3/metrics.proto b/generated_api_shadow/envoy/admin/v3/metrics.proto deleted file mode 100644 index 71592ac1e9ecf..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/metrics.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "MetricsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metrics] - -// Proto representation of an Envoy Counter or Gauge value. -message SimpleMetric { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.SimpleMetric"; - - enum Type { - COUNTER = 0; - GAUGE = 1; - } - - // Type of the metric represented. - Type type = 1; - - // Current metric value. - uint64 value = 2; - - // Name of the metric. - string name = 3; -} diff --git a/generated_api_shadow/envoy/admin/v3/mutex_stats.proto b/generated_api_shadow/envoy/admin/v3/mutex_stats.proto deleted file mode 100644 index 49965d87ae805..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/mutex_stats.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "MutexStatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: MutexStats] - -// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run -// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` -// [docs](https://abseil.io/about/design/mutex#extra-features). -// -// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not -// correspond to core clock frequency. For more information, see the `CycleClock` -// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). -message MutexStats { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.MutexStats"; - - // The number of individual mutex contentions which have occurred since startup. - uint64 num_contentions = 1; - - // The length of the current contention wait cycle. - uint64 current_wait_cycles = 2; - - // The lifetime total of all contention wait cycles. - uint64 lifetime_wait_cycles = 3; -} diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto deleted file mode 100644 index 7593ade49a62e..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ /dev/null @@ -1,205 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "ServerInfoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Server State] - -// Proto representation of the value returned by /server_info, containing -// server version/server status information. -// [#next-free-field: 8] -message ServerInfo { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ServerInfo"; - - enum State { - // Server is live and serving traffic. - LIVE = 0; - - // Server is draining listeners in response to external health checks failing. - DRAINING = 1; - - // Server has not yet completed cluster manager initialization. - PRE_INITIALIZING = 2; - - // Server is running the cluster manager initialization callbacks (e.g., RDS). - INITIALIZING = 3; - } - - // Server version. - string version = 1; - - // State of the server. - State state = 2; - - // Uptime since current epoch was started. - google.protobuf.Duration uptime_current_epoch = 3; - - // Uptime since the start of the first epoch. - google.protobuf.Duration uptime_all_epochs = 4; - - // Hot restart version. - string hot_restart_version = 5; - - // Command line options the server is currently running with. - CommandLineOptions command_line_options = 6; - - // Populated node identity of this server. - config.core.v3.Node node = 7; -} - -// [#next-free-field: 38] -message CommandLineOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.CommandLineOptions"; - - enum IpVersion { - v4 = 0; - v6 = 1; - } - - enum Mode { - // Validate configs and then serve traffic normally. - Serve = 0; - - // Validate configs and exit. - Validate = 1; - - // Completely load and initialize the config, and then exit without running the listener loop. - InitOnly = 2; - } - - enum DrainStrategy { - // Gradually discourage connections over the course of the drain period. - Gradual = 0; - - // Discourage all connections for the duration of the drain sequence. - Immediate = 1; - } - - reserved 12, 29; - - reserved "bootstrap_version"; - - // See :option:`--base-id` for details. - uint64 base_id = 1; - - // See :option:`--use-dynamic-base-id` for details. - bool use_dynamic_base_id = 31; - - // See :option:`--base-id-path` for details. - string base_id_path = 32; - - // See :option:`--concurrency` for details. - uint32 concurrency = 2; - - // See :option:`--config-path` for details. - string config_path = 3; - - // See :option:`--config-yaml` for details. - string config_yaml = 4; - - // See :option:`--allow-unknown-static-fields` for details. - bool allow_unknown_static_fields = 5; - - // See :option:`--reject-unknown-dynamic-fields` for details. - bool reject_unknown_dynamic_fields = 26; - - // See :option:`--ignore-unknown-dynamic-fields` for details. - bool ignore_unknown_dynamic_fields = 30; - - // See :option:`--admin-address-path` for details. - string admin_address_path = 6; - - // See :option:`--local-address-ip-version` for details. - IpVersion local_address_ip_version = 7; - - // See :option:`--log-level` for details. - string log_level = 8; - - // See :option:`--component-log-level` for details. - string component_log_level = 9; - - // See :option:`--log-format` for details. - string log_format = 10; - - // See :option:`--log-format-escaped` for details. - bool log_format_escaped = 27; - - // See :option:`--log-path` for details. - string log_path = 11; - - // See :option:`--service-cluster` for details. - string service_cluster = 13; - - // See :option:`--service-node` for details. - string service_node = 14; - - // See :option:`--service-zone` for details. - string service_zone = 15; - - // See :option:`--file-flush-interval-msec` for details. - google.protobuf.Duration file_flush_interval = 16; - - // See :option:`--drain-time-s` for details. - google.protobuf.Duration drain_time = 17; - - // See :option:`--drain-strategy` for details. - DrainStrategy drain_strategy = 33; - - // See :option:`--parent-shutdown-time-s` for details. - google.protobuf.Duration parent_shutdown_time = 18; - - // See :option:`--mode` for details. - Mode mode = 19; - - // See :option:`--disable-hot-restart` for details. - bool disable_hot_restart = 22; - - // See :option:`--enable-mutex-tracing` for details. - bool enable_mutex_tracing = 23; - - // See :option:`--restart-epoch` for details. - uint32 restart_epoch = 24; - - // See :option:`--cpuset-threads` for details. - bool cpuset_threads = 25; - - // See :option:`--disable-extensions` for details. - repeated string disabled_extensions = 28; - - // See :option:`--enable-fine-grain-logging` for details. - bool enable_fine_grain_logging = 34; - - // See :option:`--socket-path` for details. - string socket_path = 35; - - // See :option:`--socket-mode` for details. - uint32 socket_mode = 36; - - // See :option:`--enable-core-dump` for details. - bool enable_core_dump = 37; - - uint64 hidden_envoy_deprecated_max_stats = 20 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - uint64 hidden_envoy_deprecated_max_obj_name_len = 21 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} diff --git a/generated_api_shadow/envoy/admin/v3/tap.proto b/generated_api_shadow/envoy/admin/v3/tap.proto deleted file mode 100644 index 934170b2deeab..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/tap.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/config/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap] - -// The /tap admin request body that is used to configure an active tap session. -message TapRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.TapRequest"; - - // The opaque configuration ID used to match the configuration to a loaded extension. - // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The tap configuration to load. - config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/annotations/BUILD b/generated_api_shadow/envoy/annotations/BUILD deleted file mode 100644 index 5c06e2deae7d8..0000000000000 --- a/generated_api_shadow/envoy/annotations/BUILD +++ /dev/null @@ -1,5 +0,0 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package() diff --git a/generated_api_shadow/envoy/annotations/deprecation.proto b/generated_api_shadow/envoy/annotations/deprecation.proto deleted file mode 100644 index ce02ab98a8dcd..0000000000000 --- a/generated_api_shadow/envoy/annotations/deprecation.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.annotations; - -import "google/protobuf/descriptor.proto"; - -// [#protodoc-title: Deprecation] -// Adds annotations for deprecated fields and enums to allow tagging proto -// fields as fatal by default and the minor version on which the field was -// deprecated. One Envoy release after deprecation, deprecated fields will be -// disallowed by default, a state which is reversible with -// :ref:`runtime overrides `. - -// Magic number in this file derived from top 28bit of SHA256 digest of -// "envoy.annotation.disallowed_by_default" and "envoy.annotation.deprecated_at_minor_version" -extend google.protobuf.FieldOptions { - bool disallowed_by_default = 189503207; - - // The API major and minor version on which the field was deprecated - // (e.g., "3.5" for major version 3 and minor version 5). - string deprecated_at_minor_version = 157299826; -} - -// Magic number in this file derived from top 28bit of SHA256 digest of -// "envoy.annotation.disallowed_by_default_enum" and -// "envoy.annotation.deprecated_at_minor_version_eum" -extend google.protobuf.EnumValueOptions { - bool disallowed_by_default_enum = 70100853; - - // The API major and minor version on which the enum value was deprecated - // (e.g., "3.5" for major version 3 and minor version 5). - string deprecated_at_minor_version_enum = 181198657; -} diff --git a/generated_api_shadow/envoy/annotations/resource.proto b/generated_api_shadow/envoy/annotations/resource.proto deleted file mode 100644 index b9dcf658e5226..0000000000000 --- a/generated_api_shadow/envoy/annotations/resource.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.annotations; - -import "google/protobuf/descriptor.proto"; - -// Magic number in this file derived from top 28bit of SHA256 digest of "envoy.annotation.resource". -extend google.protobuf.ServiceOptions { - ResourceAnnotation resource = 265073217; -} - -message ResourceAnnotation { - // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource - // type. - string type = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/BUILD b/generated_api_shadow/envoy/api/v2/BUILD deleted file mode 100644 index 0aded6e51b71a..0000000000000 --- a/generated_api_shadow/envoy/api/v2/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/auth:pkg", - "//envoy/api/v2/cluster:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/endpoint:pkg", - "//envoy/api/v2/listener:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/config/listener/v2:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/README.md b/generated_api_shadow/envoy/api/v2/README.md deleted file mode 100644 index 984be690a103b..0000000000000 --- a/generated_api_shadow/envoy/api/v2/README.md +++ /dev/null @@ -1,9 +0,0 @@ -Protocol buffer definitions for xDS and top-level resource API messages. - -Package group `//envoy/api/v2:friends` enumerates all consumers of the shared -API messages. That includes package envoy.api.v2 itself, which contains several -xDS definitions. Default visibility for all shared definitions should be set to -`//envoy/api/v2:friends`. - -Additionally, packages envoy.api.v2.core and envoy.api.v2.auth are also -consumed throughout the subpackages of `//envoy/api/v2`. diff --git a/generated_api_shadow/envoy/api/v2/auth/BUILD b/generated_api_shadow/envoy/api/v2/auth/BUILD deleted file mode 100644 index aaab1df155473..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/auth/cert.proto b/generated_api_shadow/envoy/api/v2/auth/cert.proto deleted file mode 100644 index 6a9cbddd25084..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/cert.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.auth; - -import "udpa/annotations/migrate.proto"; - -import public "envoy/api/v2/auth/common.proto"; -import public "envoy/api/v2/auth/secret.proto"; -import public "envoy/api/v2/auth/tls.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.auth"; -option java_outer_classname = "CertProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tls.v3"; diff --git a/generated_api_shadow/envoy/api/v2/auth/common.proto b/generated_api_shadow/envoy/api/v2/auth/common.proto deleted file mode 100644 index c8122f4010297..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/common.proto +++ /dev/null @@ -1,327 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.auth; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.auth"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for - // servers. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; - - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - // The TLS certificate chain. - core.DataSource certificate_chain = 1; - - // The TLS private key. - core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - core.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated core.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - core.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative Names. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified values. - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated string verify_subject_alt_name = 4 [deprecated = true]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - core.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/auth/secret.proto b/generated_api_shadow/envoy/api/v2/auth/secret.proto deleted file mode 100644 index 3a6d8cf7dcb67..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/secret.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.auth; - -import "envoy/api/v2/auth/common.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.auth"; -option java_outer_classname = "SecretProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Secrets configuration] - -message GenericSecret { - // Secret of generic type and is available to filters. - core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - core.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/api/v2/auth/tls.proto b/generated_api_shadow/envoy/api/v2/auth/tls.proto deleted file mode 100644 index 201973a2b9de8..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/tls.proto +++ /dev/null @@ -1,152 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.auth; - -import "envoy/api/v2/auth/common.proto"; -import "envoy/api/v2/auth/secret.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.auth"; -option java_outer_classname = "TlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: TLS transport socket] -// [#extension: envoy.transport_sockets.tls] -// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. - -message UpstreamTlsContext { - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - message CombinedCertificateValidationContext { - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} diff --git a/generated_api_shadow/envoy/api/v2/cds.proto b/generated_api_shadow/envoy/api/v2/cds.proto deleted file mode 100644 index 0b657a0fa452b..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cds.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/cluster.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "CdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: CDS] - -// Return list of all clusters this proxy will load balance to. -service ClusterDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.Cluster"; - - rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:clusters"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message CdsDummy { -} diff --git a/generated_api_shadow/envoy/api/v2/cluster.proto b/generated_api_shadow/envoy/api/v2/cluster.proto deleted file mode 100644 index fab95f71b7630..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster.proto +++ /dev/null @@ -1,867 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/auth/tls.proto"; -import "envoy/api/v2/cluster/circuit_breaker.proto"; -import "envoy/api/v2/cluster/filter.proto"; -import "envoy/api/v2/cluster/outlier_detection.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/core/health_check.proto"; -import "envoy/api/v2/core/protocol.proto"; -import "envoy/api/v2/endpoint.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Cluster configuration] - -// Configuration for a single upstream cluster. -// [#next-free-field: 48] -message Cluster { - // Refer to :ref:`service discovery type ` - // for an explanation on each type. - enum DiscoveryType { - // Refer to the :ref:`static discovery type` - // for an explanation. - STATIC = 0; - - // Refer to the :ref:`strict DNS discovery - // type` - // for an explanation. - STRICT_DNS = 1; - - // Refer to the :ref:`logical DNS discovery - // type` - // for an explanation. - LOGICAL_DNS = 2; - - // Refer to the :ref:`service discovery type` - // for an explanation. - EDS = 3; - - // Refer to the :ref:`original destination discovery - // type` - // for an explanation. - ORIGINAL_DST = 4; - } - - // Refer to :ref:`load balancer type ` architecture - // overview section for information on each type. - enum LbPolicy { - // Refer to the :ref:`round robin load balancing - // policy` - // for an explanation. - ROUND_ROBIN = 0; - - // Refer to the :ref:`least request load balancing - // policy` - // for an explanation. - LEAST_REQUEST = 1; - - // Refer to the :ref:`ring hash load balancing - // policy` - // for an explanation. - RING_HASH = 2; - - // Refer to the :ref:`random load balancing - // policy` - // for an explanation. - RANDOM = 3; - - // Refer to the :ref:`original destination load balancing - // policy` - // for an explanation. - // - // .. attention:: - // - // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. - // - ORIGINAL_DST_LB = 4 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; - - // Refer to the :ref:`Maglev load balancing policy` - // for an explanation. - MAGLEV = 5; - - // This load balancer type must be specified if the configured cluster provides a cluster - // specific load balancer. Consult the configured cluster's documentation for whether to set - // this option or not. - CLUSTER_PROVIDED = 6; - - // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy - // ` field to determine the LB policy. - // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - // and instead using the new load_balancing_policy field as the one and only mechanism for - // configuring this.] - LOAD_BALANCING_POLICY_CONFIG = 7; - } - - // When V4_ONLY is selected, the DNS resolver will only perform a lookup for - // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will - // only perform a lookup for addresses in the IPv6 family. If AUTO is - // specified, the DNS resolver will first perform a lookup for addresses in - // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. - // For cluster types other than - // :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS`, - // this setting is - // ignored. - enum DnsLookupFamily { - AUTO = 0; - V4_ONLY = 1; - V6_ONLY = 2; - } - - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - - // TransportSocketMatch specifies what transport socket config will be used - // when the match conditions are satisfied. - message TransportSocketMatch { - // The name of the match, used in stats generation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria. - // The connection to the endpoint with metadata matching what is set in this field - // will use the transport socket configuration specified here. - // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match - // against the values specified in this field. - google.protobuf.Struct match = 2; - - // The configuration of the transport socket. - core.TransportSocket transport_socket = 3; - } - - // Extended cluster type. - message CustomClusterType { - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - google.protobuf.Any typed_config = 2; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - // Configuration for the source of EDS updates for this Cluster. - core.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; - } - - // Optionally divide the endpoints in this cluster into subsets defined by - // endpoint metadata and selected by route and weighted cluster metadata. - // [#next-free-field: 8] - message LbSubsetConfig { - // If NO_FALLBACK is selected, a result - // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, - // any cluster endpoint may be returned (subject to policy, health checks, - // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - enum LbSubsetFallbackPolicy { - NO_FALLBACK = 0; - ANY_ENDPOINT = 1; - DEFAULT_SUBSET = 2; - } - - // Specifications for subsets. - message LbSubsetSelector { - // Allows to override top level fallback policy per selector. - enum LbSubsetSelectorFallbackPolicy { - // If NOT_DEFINED top level config fallback policy is used instead. - NOT_DEFINED = 0; - - // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. - NO_FALLBACK = 1; - - // If ANY_ENDPOINT is selected, any cluster endpoint may be returned - // (subject to policy, health checks, etc). - ANY_ENDPOINT = 2; - - // If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - DEFAULT_SUBSET = 3; - - // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata - // keys reduced to - // :ref:`fallback_keys_subset`. - // It allows for a fallback to a different, less specific selector if some of the keys of - // the selector are considered optional. - KEYS_SUBSET = 4; - } - - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum = {defined_only: true}]; - - // Subset of - // :ref:`keys` used by - // :ref:`KEYS_SUBSET` - // fallback policy. - // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. - // For any other fallback policy the parameter is not used and should not be set. - // Only values also present in - // :ref:`keys` are allowed, but - // `fallback_keys_subset` cannot be equal to `keys`. - repeated string fallback_keys_subset = 3; - } - - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - - // For each entry, LbEndpoint.Metadata's - // *envoy.lb* namespace is traversed and a subset is created for each unique - // combination of key and value. For example: - // - // .. code-block:: json - // - // { "subset_selectors": [ - // { "keys": [ "version" ] }, - // { "keys": [ "stage", "hardware_type" ] } - // ]} - // - // A subset is matched when the metadata from the selected route and - // weighted cluster contains the same keys and values as the subset's - // metadata. The same host may appear in multiple subsets. - repeated LbSubsetSelector subset_selectors = 3; - - // If true, routing to subsets will take into account the localities and locality weights of the - // endpoints when making the routing decision. - // - // There are some potential pitfalls associated with enabling this feature, as the resulting - // traffic split after applying both a subset match and locality weights might be undesirable. - // - // Consider for example a situation in which you have 50/50 split across two localities X/Y - // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 - // host selected but Y having 100, then a lot more load is being dumped on the single host in X - // than originally anticipated in the load balancing assignment delivered via EDS. - bool locality_weight_aware = 4; - - // When used with locality_weight_aware, scales the weight of each locality by the ratio - // of hosts in the subset vs hosts in the original subset. This aims to even out the load - // going to an individual locality if said locality is disproportionately affected by the - // subset predicate. - bool scale_locality_weight = 5; - - // If true, when a fallback policy is configured and its corresponding subset fails to find - // a host this will cause any host to be selected instead. - // - // This is useful when using the default subset as the fallback policy, given the default - // subset might become empty. With this option enabled, if that happens the LB will attempt - // to select a host from the entire cluster. - bool panic_mode_any = 6; - - // If true, metadata specified for a metadata key will be matched against the corresponding - // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value - // and any of the elements in the list matches the criteria. - bool list_as_any = 7; - } - - // Specific configuration for the LeastRequest load balancing policy. - message LeastRequestLbConfig { - // The number of random healthy hosts from which the host with the fewest active requests will - // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; - } - - // Specific configuration for the :ref:`RingHash` - // load balancing policy. - message RingHashLbConfig { - // The hash function used to hash hosts onto the ketama ring. - enum HashFunction { - // Use `xxHash `_, this is the default hash function. - XX_HASH = 0; - - // Use `MurmurHash2 `_, this is compatible with - // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled - // on Linux and not macOS. - MURMUR_HASH_2 = 1; - } - - reserved 2; - - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; - - // The hash function used to hash hosts onto the ketama ring. The value defaults to - // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; - - // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered - // to further constrain resource use. See also - // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; - } - - // Specific configuration for the - // :ref:`Original Destination ` - // load balancing policy. - message OriginalDstLbConfig { - // When true, :ref:`x-envoy-original-dst-host - // ` can be used to override destination - // address. - // - // .. attention:: - // - // This header isn't sanitized by default, so enabling this feature allows HTTP clients to - // route traffic to arbitrary hosts and/or ports, which may have serious security - // consequences. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - bool use_http_header = 1; - } - - // Common configuration for all load balancer implementations. - // [#next-free-field: 8] - message CommonLbConfig { - // Configuration for :ref:`zone aware routing - // `. - message ZoneAwareLbConfig { - // Configures percentage of requests that will be considered for zone aware routing - // if zone aware routing is configured. If not specified, the default is 100%. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - type.Percent routing_enabled = 1; - - // Configures minimum upstream cluster size required for zone aware routing - // If upstream cluster size is less than specified, zone aware routing is not performed - // even if zone aware routing is configured. If not specified, the default is 6. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - google.protobuf.UInt64Value min_cluster_size = 2; - - // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic - // mode`. Instead, the cluster will fail all - // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a - // failing service. - bool fail_traffic_on_panic = 3; - } - - // Configuration for :ref:`locality weighted load balancing - // ` - message LocalityWeightedLbConfig { - } - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - message ConsistentHashingLbConfig { - // If set to `true`, the cluster will use hostname instead of the resolved - // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. - bool use_hostname_for_hashing = 1; - } - - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - type.Percent healthy_panic_threshold = 1; - - oneof locality_config_specifier { - ZoneAwareLbConfig zone_aware_lb_config = 2; - - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } - - // If set, all health check/weight/metadata updates that happen within this duration will be - // merged and delivered in one shot when the duration expires. The start of the duration is when - // the first update happens. This is useful for big clusters, with potentially noisy deploys - // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes - // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new - // cluster). Please always keep in mind that the use of sandbox technologies may change this - // behavior. - // - // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge - // window to 0. - // - // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is - // because merging those updates isn't currently safe. See - // https://github.com/envoyproxy/envoy/pull/3941. - google.protobuf.Duration update_merge_window = 4; - - // If set to true, Envoy will not consider new hosts when computing load balancing weights until - // they have been health checked for the first time. This will have no effect unless - // active health checking is also configured. - // - // Ignoring a host means that for any load balancing calculations that adjust weights based - // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and - // panic mode) Envoy will exclude these hosts in the denominator. - // - // For example, with hosts in two priorities P0 and P1, where P0 looks like - // {healthy, unhealthy (new), unhealthy (new)} - // and where P1 looks like - // {healthy, healthy} - // all traffic will still hit P0, as 1 / (3 - 2) = 1. - // - // Enabling this will allow scaling up the number of hosts for a given cluster without entering - // panic mode or triggering priority spillover, assuming the hosts pass the first health check. - // - // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not - // contribute to the calculation when deciding whether panic mode is enabled or not. - bool ignore_new_hosts_until_first_hc = 5; - - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - bool close_connections_on_host_set_change = 6; - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - ConsistentHashingLbConfig consistent_hashing_lb_config = 7; - } - - message RefreshRate { - // Specifies the base interval between refreshes. This parameter is required and must be greater - // than zero and less than - // :ref:`max_interval `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {nanos: 1000000} - }]; - - // Specifies the maximum interval between refreshes. This parameter is optional, but must be - // greater than or equal to the - // :ref:`base_interval ` if set. The default - // is 10 times the :ref:`base_interval `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; - } - - reserved 12, 15; - - // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket_match* in the - // :ref:`LbEndpoint.Metadata ` - // is used to match against the transport sockets as they appear in the list. The first - // :ref:`match ` is used. - // For example, with the following match - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "enableMTLS" - // match: - // acceptMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - name: "defaultToPlaintext" - // match: {} - // transport_socket: - // name: envoy.transport_sockets.raw_buffer - // - // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* - // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. - // - // If a :ref:`socket match ` with empty match - // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" - // socket match in case above. - // - // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any - // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - // *transport_socket* specified in this cluster. - // - // This field allows gradual and flexible transport socket configuration changes. - // - // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, - // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", - // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic - // has "acceptPlaintext": "true" metadata information. - // - // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS - // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - // *TransportSocketMatch* in this field. Other client Envoys receive CDS without - // *transport_socket_match* set, and still send plain text traffic to the same cluster. - // - // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] - repeated TransportSocketMatch transport_socket_matches = 43; - - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // An optional alternative to the cluster name to be used while emitting stats. - // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be - // confused with :ref:`Router Filter Header - // `. - string alt_stat_name = 28; - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; - - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; - - // If the service discovery type is - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS`, - // then hosts is required. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`load_assignment` field instead. - // - repeated core.Address hosts = 7 [deprecated = true]; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes the *hosts* field in the v2 API. - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // - ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.HealthCheck health_checks = 8; - - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; - - // Optional :ref:`circuit breaking ` for the cluster. - cluster.CircuitBreakers circuit_breakers = 10; - - // The TLS configuration for connections to the upstream cluster. - // - // .. attention:: - // - // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - // set, `transport_socket` takes priority. - auth.UpstreamTlsContext tls_context = 11 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // HTTP protocol options that are applied only to upstream HTTP connections. - // These options apply to all HTTP versions. - core.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; - - // Additional options when handling HTTP requests upstream. These options will be applicable to - // both HTTP1 and HTTP2 requests. - core.HttpProtocolOptions common_http_protocol_options = 29; - - // Additional options when handling HTTP1 requests. - core.Http1ProtocolOptions http_protocol_options = 13; - - // Even if default HTTP2 protocol options are desired, this field must be - // set so that Envoy will assume that the upstream supports HTTP/2 when - // making new HTTP connection pool connections. Currently, Envoy only - // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - // connections to happen over plain text. - core.Http2ProtocolOptions http2_protocol_options = 14; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map extension_protocol_options = 35 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map typed_extension_protocol_options = 36; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. The value configured must be at least 1ms. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 - [(validate.rules).duration = {gt {nanos: 1000000}}]; - - // If the DNS failure refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types - // other than :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS` this setting is - // ignored. - RefreshRate dns_failure_refresh_rate = 44; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; - - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; - - // If DNS resolvers are specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used to specify the cluster’s dns resolvers. - // If this setting is not specified, the value defaults to the default - // resolver, which uses /etc/resolv.conf for configuration. For cluster types - // other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. - repeated core.Address dns_resolvers = 18; - - // [#next-major-version: Reconcile DNS options in a single message.] - // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - bool use_tcp_for_dns_lookups = 45; - - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - cluster.OutlierDetection outlier_detection = 19; - - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; - - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.BindConfig upstream_bind_config = 21; - - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - - // Common configuration for all load balancer implementations. - CommonLbConfig common_lb_config = 27; - - // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `tls` and - // :ref:`UpstreamTlsContexts ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - core.TransportSocket transport_socket = 24; - - // The Metadata field can be used to provide additional information about the - // cluster. It can be used for stats, logging, and varying filter behavior. - // Fields should use reverse DNS notation to denote which entity within Envoy - // will need the information. For instance, if the metadata is intended for - // the Router filter, the filter name should be specified as *envoy.filters.http.router*. - core.Metadata metadata = 25; - - // Determines how Envoy selects the protocol used to speak to upstream hosts. - ClusterProtocolSelection protocol_selection = 26; - - // Optional options for upstream connections. - UpstreamConnectionOptions upstream_connection_options = 30; - - // If an upstream host becomes unhealthy (as determined by the configured health checks - // or outlier detection), immediately close all connections to the failed host. - // - // .. note:: - // - // This is currently only supported for connections created by tcp_proxy. - // - // .. note:: - // - // The current implementation of this feature closes all connections immediately when - // the unhealthy status is detected. If there are a large number of connections open - // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of - // time exclusively closing these connections, and not processing any other traffic. - bool close_connections_on_host_health_failure = 31; - - // If set to true, Envoy will ignore the health value of a host when processing its removal - // from service discovery. This means that if active health checking is used, Envoy will *not* - // wait for the endpoint to go unhealthy before removing it. - bool drain_connections_on_host_removal = 32 - [(udpa.annotations.field_migrate).rename = "ignore_health_on_host_removal"]; - - // An (optional) network filter chain, listed in the order the filters should be applied. - // The chain will be applied to all outgoing connections that Envoy makes to the upstream - // servers of this cluster. - repeated cluster.Filter filters = 40; - - // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the - // :ref:`lb_policy` field has the value - // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - LoadBalancingPolicy load_balancing_policy = 41; - - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - core.ConfigSource lrs_server = 42; - - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; -} - -// [#not-implemented-hide:] Extensible load balancing policy configuration. -// -// Every LB policy defined via this mechanism will be identified via a unique name using reverse -// DNS notation. If the policy needs configuration parameters, it must define a message for its -// own configuration, which will be stored in the config field. The name of the policy will tell -// clients which type of message they should expect to see in the config field. -// -// Note that there are cases where it is useful to be able to independently select LB policies -// for choosing a locality and for choosing an endpoint within that locality. For example, a -// given deployment may always use the same policy to choose the locality, but for choosing the -// endpoint within the locality, some clusters may use weighted-round-robin, while others may -// use some sort of session-based balancing. -// -// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a -// child LB policy for each locality. For each request, the parent chooses the locality and then -// delegates to the child policy for that locality to choose the endpoint within the locality. -// -// To facilitate this, the config message for the top-level LB policy may include a field of -// type LoadBalancingPolicy that specifies the child policy. -message LoadBalancingPolicy { - message Policy { - // Required. The name of the LB policy. - string name = 1; - - // Optional config for the LB policy. - // No more than one of these two fields may be populated. - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - - // Each client will iterate over the list in order and stop at the first policy that it - // supports. This provides a mechanism for starting to use new LB policies that are not yet - // supported by all clients. - repeated Policy policies = 1; -} - -// An extensible structure containing the address Envoy should bind to when -// establishing upstream connections. -message UpstreamBindConfig { - // The address Envoy should bind to when establishing upstream connections. - core.Address source_address = 1; -} - -message UpstreamConnectionOptions { - // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - core.TcpKeepalive tcp_keepalive = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/cluster/BUILD b/generated_api_shadow/envoy/api/v2/cluster/BUILD deleted file mode 100644 index 2ffbc958786b3..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto b/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto deleted file mode 100644 index 510619b264296..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.cluster; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option java_outer_classname = "CircuitBreakerProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Circuit breakers] - -// :ref:`Circuit breaking` settings can be -// specified individually for each defined priority. -message CircuitBreakers { - // A Thresholds defines CircuitBreaker settings for a - // :ref:`RoutingPriority`. - // [#next-free-field: 9] - message Thresholds { - message RetryBudget { - // Specifies the limit on concurrent retries as a percentage of the sum of active requests and - // active pending requests. For example, if there are 100 active requests and the - // budget_percent is set to 25, there may be 25 active retries. - // - // This parameter is optional. Defaults to 20%. - type.Percent budget_percent = 1; - - // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the - // number of active retries may never go below this number. - // - // This parameter is optional. Defaults to 3. - google.protobuf.UInt32Value min_retry_concurrency = 2; - } - - // The :ref:`RoutingPriority` - // the specified CircuitBreaker settings apply to. - core.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; - - // The maximum number of connections that Envoy will make to the upstream - // cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_connections = 2; - - // The maximum number of pending requests that Envoy will allow to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 3; - - // The maximum number of parallel requests that Envoy will make to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_requests = 4; - - // The maximum number of parallel retries that Envoy will allow to the - // upstream cluster. If not specified, the default is 3. - google.protobuf.UInt32Value max_retries = 5; - - // Specifies a limit on concurrent retries in relation to the number of active requests. This - // parameter is optional. - // - // .. note:: - // - // If this field is set, the retry budget will override any configured retry circuit - // breaker. - RetryBudget retry_budget = 8; - - // If track_remaining is true, then stats will be published that expose - // the number of resources remaining until the circuit breakers open. If - // not specified, the default is false. - // - // .. note:: - // - // If a retry budget is used in lieu of the max_retries circuit breaker, - // the remaining retry resources remaining will not be tracked. - bool track_remaining = 6; - - // The maximum number of connection pools per cluster that Envoy will concurrently support at - // once. If not specified, the default is unlimited. Set this for clusters which create a - // large number of connection pools. See - // :ref:`Circuit Breaking ` for - // more details. - google.protobuf.UInt32Value max_connection_pools = 7; - } - - // If multiple :ref:`Thresholds` - // are defined with the same :ref:`RoutingPriority`, - // the first one in the list is used. If no Thresholds is defined for a given - // :ref:`RoutingPriority`, the default values - // are used. - repeated Thresholds thresholds = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/cluster/filter.proto b/generated_api_shadow/envoy/api/v2/cluster/filter.proto deleted file mode 100644 index b87ad79d8f352..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster/filter.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.cluster; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option java_outer_classname = "FilterProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Upstream filters] -// Upstream filters apply to the connections to the upstream cluster hosts. - -message Filter { - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any typed_config = 2; -} diff --git a/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto b/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto deleted file mode 100644 index 6cf35e41ff153..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto +++ /dev/null @@ -1,151 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.cluster; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option java_outer_classname = "OutlierDetectionProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Outlier detection] - -// See the :ref:`architecture overview ` for -// more information on outlier detection. -// [#next-free-field: 21] -message OutlierDetection { - // The number of consecutive 5xx responses or local origin errors that are mapped - // to 5xx error codes before a consecutive 5xx ejection - // occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_5xx = 1; - - // The time interval between ejection analysis sweeps. This can result in - // both new ejections as well as hosts being returned to service. Defaults - // to 10000ms or 10s. - google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; - - // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected. - // Defaults to 30000ms or 30s. - google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; - - // The maximum % of an upstream cluster that can be ejected due to outlier - // detection. Defaults to 10% but will eject at least one host regardless of the value. - google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive 5xx. This setting can be used to disable - // ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics. This setting can be used to - // disable ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; - - // The number of hosts in a cluster that must have enough request volume to - // detect success rate outliers. If the number of hosts is less than this - // setting, outlier detection via success rate statistics is not performed - // for any host in the cluster. Defaults to 5. - google.protobuf.UInt32Value success_rate_minimum_hosts = 7; - - // The minimum number of total requests that must be collected in one - // interval (as defined by the interval duration above) to include this host - // in success rate based outlier detection. If the volume is lower than this - // setting, outlier detection via success rate statistics is not performed - // for that host. Defaults to 100. - google.protobuf.UInt32Value success_rate_request_volume = 8; - - // This factor is used to determine the ejection threshold for success rate - // outlier ejection. The ejection threshold is the difference between the - // mean success rate, and the product of this factor and the standard - // deviation of the mean success rate: mean - (stdev * - // success_rate_stdev_factor). This factor is divided by a thousand to get a - // double. That is, if the desired factor is 1.9, the runtime value should - // be 1900. Defaults to 1900. - google.protobuf.UInt32Value success_rate_stdev_factor = 9; - - // The number of consecutive gateway failures (502, 503, 504 status codes) - // before a consecutive gateway failure ejection occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_gateway_failure = 10; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive gateway failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 - [(validate.rules).uint32 = {lte: 100}]; - - // Determines whether to distinguish local origin failures from external errors. If set to true - // the following configuration parameters are taken into account: - // :ref:`consecutive_local_origin_failure`, - // :ref:`enforcing_consecutive_local_origin_failure` - // and - // :ref:`enforcing_local_origin_success_rate`. - // Defaults to false. - bool split_external_local_origin_errors = 12; - - // The number of consecutive locally originated failures before ejection - // occurs. Defaults to 5. Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value consecutive_local_origin_failure = 13; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive locally originated failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics for locally originated errors. - // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 - [(validate.rules).uint32 = {lte: 100}]; - - // The failure percentage to use when determining failure percentage-based outlier detection. If - // the failure percentage of a given host is greater than or equal to this value, it will be - // ejected. Defaults to 85. - google.protobuf.UInt32Value failure_percentage_threshold = 16 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // failure percentage statistics. This setting can be used to disable ejection or to ramp it up - // slowly. Defaults to 0. - // - // [#next-major-version: setting this without setting failure_percentage_threshold should be - // invalid in v4.] - google.protobuf.UInt32Value enforcing_failure_percentage = 17 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // local-origin failure percentage statistics. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 - [(validate.rules).uint32 = {lte: 100}]; - - // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. - // If the total number of hosts in the cluster is less than this value, failure percentage-based - // ejection will not be performed. Defaults to 5. - google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; - - // The minimum number of total requests that must be collected in one interval (as defined by the - // interval duration above) to perform failure percentage-based ejection for this host. If the - // volume is lower than this setting, failure percentage-based ejection will not be performed for - // this host. Defaults to 50. - google.protobuf.UInt32Value failure_percentage_request_volume = 20; -} diff --git a/generated_api_shadow/envoy/api/v2/core/BUILD b/generated_api_shadow/envoy/api/v2/core/BUILD deleted file mode 100644 index 8475a4ba83760..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/core/address.proto b/generated_api_shadow/envoy/api/v2/core/address.proto deleted file mode 100644 index fdcb4e7d94f9a..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/address.proto +++ /dev/null @@ -1,134 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/socket_option.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "AddressProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Network addresses] - -message Pipe { - // Unix Domain Socket path. On Linux, paths starting with '@' will use the - // abstract namespace. The starting '@' is replaced by a null byte by Envoy. - // Paths starting with '@' will result in an error in environments other than - // Linux. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The mode for the Pipe. Not applicable for abstract sockets. - uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; -} - -// [#next-free-field: 7] -message SocketAddress { - enum Protocol { - TCP = 0; - UDP = 1; - } - - Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; - - // The address for this socket. :ref:`Listeners ` will bind - // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` - // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: - // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used - // within an upstream :ref:`BindConfig `, the address - // controls the source address of outbound connections. For :ref:`clusters - // `, the cluster type determines whether the - // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized - // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_bytes: 1}]; - - oneof port_specifier { - option (validate.required) = true; - - uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; - - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. - string named_port = 4; - } - - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. - string resolver_name = 5; - - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. - bool ipv4_compat = 6; -} - -message TcpKeepalive { - // Maximum number of keepalive probes to send without response before deciding - // the connection is dead. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 9.) - google.protobuf.UInt32Value keepalive_probes = 1; - - // The number of seconds a connection needs to be idle before keep-alive probes - // start being sent. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 7200s (i.e., 2 hours.) - google.protobuf.UInt32Value keepalive_time = 2; - - // The number of seconds between keep-alive probes. Default is to use the OS - // level configuration (unless overridden, Linux defaults to 75s.) - google.protobuf.UInt32Value keepalive_interval = 3; -} - -message BindConfig { - // The address to bind to when creating a socket. - SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; - - // Whether to set the *IP_FREEBIND* option when creating the socket. When this - // flag is set to true, allows the :ref:`source_address - // ` to be an IP address - // that is not configured on the system running Envoy. When this flag is set - // to false, the option *IP_FREEBIND* is disabled on the socket. When this - // flag is not set (default), the socket is not modified, i.e. the option is - // neither enabled nor disabled. - google.protobuf.BoolValue freebind = 2; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated SocketOption socket_options = 3; -} - -// Addresses specify either a logical or physical address and port, which are -// used to tell Envoy where to bind/listen, connect to upstream and find -// management servers. -message Address { - oneof address { - option (validate.required) = true; - - SocketAddress socket_address = 1; - - Pipe pipe = 2; - } -} - -// CidrRange specifies an IP Address and a prefix length to construct -// the subnet mask for a `CIDR `_ range. -message CidrRange { - // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; -} diff --git a/generated_api_shadow/envoy/api/v2/core/backoff.proto b/generated_api_shadow/envoy/api/v2/core/backoff.proto deleted file mode 100644 index e45c71e39be8f..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/backoff.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "BackoffProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Backoff Strategy] - -// Configuration defining a jittered exponential back off strategy. -message BackoffStrategy { - // The base interval to be used for the next back off computation. It should - // be greater than zero and less than or equal to :ref:`max_interval - // `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, - // but must be greater than or equal to the :ref:`base_interval - // ` if set. The default - // is 10 times the :ref:`base_interval - // `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; -} diff --git a/generated_api_shadow/envoy/api/v2/core/base.proto b/generated_api_shadow/envoy/api/v2/core/base.proto deleted file mode 100644 index 32cd90b4ee1b4..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/base.proto +++ /dev/null @@ -1,381 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/backoff.proto"; -import "envoy/api/v2/core/http_uri.proto"; -import "envoy/type/percent.proto"; -import "envoy/type/semantic_version.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -import public "envoy/api/v2/core/socket_option.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "BaseProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common types] - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -enum RoutingPriority { - DEFAULT = 0; - HIGH = 1; -} - -// HTTP request method. -enum RequestMethod { - METHOD_UNSPECIFIED = 0; - GET = 1; - HEAD = 2; - POST = 3; - PUT = 4; - DELETE = 5; - CONNECT = 6; - OPTIONS = 7; - TRACE = 8; - PATCH = 9; -} - -// Identifies the direction of the traffic relative to the local Envoy. -enum TrafficDirection { - // Default option is unspecified. - UNSPECIFIED = 0; - - // The transport is used for incoming traffic. - INBOUND = 1; - - // The transport is used for outgoing traffic. - OUTBOUND = 2; -} - -// Identifies location of where either Envoy runs or where upstream hosts run. -message Locality { - // Region this :ref:`zone ` belongs to. - string region = 1; - - // Defines the local service zone where Envoy is running. Though optional, it - // should be set if discovery service routing is used and the discovery - // service exposes :ref:`zone data `, - // either in this message or via :option:`--service-zone`. The meaning of zone - // is context dependent, e.g. `Availability Zone (AZ) - // `_ - // on AWS, `Zone `_ on - // GCP, etc. - string zone = 2; - - // When used for locality of upstream hosts, this field further splits zone - // into smaller chunks of sub-zones so they can be load balanced - // independently. - string sub_zone = 3; -} - -// BuildVersion combines SemVer version of extension with free-form build information -// (i.e. 'alpha', 'private-build') as a set of strings. -message BuildVersion { - // SemVer version of extension. - type.SemanticVersion version = 1; - - // Free-form build information. - // Envoy defines several well known keys in the source/common/version/version.h file - google.protobuf.Struct metadata = 2; -} - -// Version and identification for an Envoy extension. -// [#next-free-field: 6] -message Extension { - // This is the name of the Envoy filter as specified in the Envoy - // configuration, e.g. envoy.filters.http.router, com.acme.widget. - string name = 1; - - // Category of the extension. - // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" - // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from - // acme.com vendor. - // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] - string category = 2; - - // [#not-implemented-hide:] Type descriptor of extension configuration proto. - // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] - // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] - string type_descriptor = 3; - - // The version is a property of the extension and maintained independently - // of other extensions and the Envoy API. - // This field is not set when extension did not provide version information. - BuildVersion version = 4; - - // Indicates that the extension is present but was disabled via dynamic configuration. - bool disabled = 5; -} - -// Identifies a specific Envoy instance. The node identifier is presented to the -// management server, which may use this identifier to distinguish per Envoy -// configuration for serving. -// [#next-free-field: 12] -message Node { - // An opaque node identifier for the Envoy node. This also provides the local - // service node name. It should be set if any of the following features are - // used: :ref:`statsd `, :ref:`CDS - // `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-node`. - string id = 1; - - // Defines the local service cluster name where Envoy is running. Though - // optional, it should be set if any of the following features are used: - // :ref:`statsd `, :ref:`health check cluster - // verification - // `, - // :ref:`runtime override directory `, - // :ref:`user agent addition - // `, - // :ref:`HTTP global rate limiting `, - // :ref:`CDS `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-cluster`. - string cluster = 2; - - // Opaque metadata extending the node identifier. Envoy will pass this - // directly to the management server. - google.protobuf.Struct metadata = 3; - - // Locality specifying where the Envoy instance is running. - Locality locality = 4; - - // This is motivated by informing a management server during canary which - // version of Envoy is being tested in a heterogeneous fleet. This will be set - // by Envoy in management server RPCs. - // This field is deprecated in favor of the user_agent_name and user_agent_version values. - string build_version = 5 [deprecated = true]; - - // Free-form string that identifies the entity requesting config. - // E.g. "envoy" or "grpc" - string user_agent_name = 6; - - oneof user_agent_version_type { - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - string user_agent_version = 7; - - // Structured version of the entity requesting config. - BuildVersion user_agent_build_version = 8; - } - - // List of extensions and their versions supported by the node. - repeated Extension extensions = 9; - - // Client feature support list. These are well known features described - // in the Envoy API repository for a given major version of an API. Client features - // use reverse DNS naming scheme, for example `com.acme.feature`. - // See :ref:`the list of features ` that xDS client may - // support. - repeated string client_features = 10; - - // Known listening ports on the node as a generic hint to the management server - // for filtering :ref:`listeners ` to be returned. For example, - // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. - repeated Address listening_addresses = 11; -} - -// Metadata provides additional inputs to filters based on matched listeners, -// filter chains, routes and endpoints. It is structured as a map, usually from -// filter name (in reverse DNS format) to metadata specific to the filter. Metadata -// key-values for a filter are merged as connection and request handling occurs, -// with later values for the same key overriding earlier values. -// -// An example use of metadata is providing additional values to -// http_connection_manager in the envoy.http_connection_manager.access_log -// namespace. -// -// Another example use of metadata is to per service config info in cluster metadata, which may get -// consumed by multiple filters. -// -// For load balancing, Metadata provides a means to subset cluster endpoints. -// Endpoints have a Metadata object associated and routes contain a Metadata -// object to match against. There are some well defined metadata used today for -// this purpose: -// -// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an -// endpoint and is also used during header processing -// (x-envoy-upstream-canary) and for stats purposes. -// [#next-major-version: move to type/metadata/v2] -message Metadata { - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - map filter_metadata = 1; -} - -// Runtime derived uint32 with a default when not specified. -message RuntimeUInt32 { - // Default value if runtime value is not available. - uint32 default_value = 2; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; -} - -// Runtime derived double with a default when not specified. -message RuntimeDouble { - // Default value if runtime value is not available. - double default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; -} - -// Runtime derived bool with a default when not specified. -message RuntimeFeatureFlag { - // Default value if runtime value is not available. - google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key to get value for comparison. This value is used if defined. The boolean value must - // be represented via its - // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; -} - -// Header name/value pair. -message HeaderValue { - // Header name. - string key = 1 - [(validate.rules).string = - {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Header value. - // - // The same :ref:`format specifier ` as used for - // :ref:`HTTP access logging ` applies here, however - // unknown header values are replaced with the empty string instead of `-`. - string value = 2 [ - (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; -} - -// Header name/value pair plus option to control append behavior. -message HeaderValueOption { - // Header name/value pair that this option applies to. - HeaderValue header = 1 [(validate.rules).message = {required: true}]; - - // Should the value be appended? If true (default), the value is appended to - // existing values. - google.protobuf.BoolValue append = 2; -} - -// Wrapper for a set of headers. -message HeaderMap { - repeated HeaderValue headers = 1; -} - -// Data source consisting of either a file or an inline value. -message DataSource { - oneof specifier { - option (validate.required) = true; - - // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Bytes inlined in the configuration. - bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; - - // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; - } -} - -// The message specifies the retry policy of remote data source when fetching fails. -message RetryPolicy { - // Specifies parameters that control :ref:`retry backoff strategy `. - // This parameter is optional, in which case the default base interval is 1000 milliseconds. The - // default maximum interval is 10 times the base interval. - BackoffStrategy retry_back_off = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; -} - -// The message specifies how to fetch data from remote and how to verify it. -message RemoteDataSource { - // The HTTP URI to fetch the remote data. - HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; - - // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Retry policy for fetching remote data. - RetryPolicy retry_policy = 3; -} - -// Async data source which support async data fetch. -message AsyncDataSource { - oneof specifier { - option (validate.required) = true; - - // Local async data source. - DataSource local = 1; - - // Remote async data source. - RemoteDataSource remote = 2; - } -} - -// Configuration for transport socket in :ref:`listeners ` and -// :ref:`clusters `. If the configuration is -// empty, a default transport socket implementation and configuration will be -// chosen based on the platform and existence of tls_context. -message TransportSocket { - // The name of the transport socket to instantiate. The name must match a supported transport - // socket implementation. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Implementation specific configuration which depends on the implementation being instantiated. - // See the supported transport socket implementations for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not -// specified via a runtime key. -// -// .. note:: -// -// Parsing of the runtime key's data is implemented such that it may be represented as a -// :ref:`FractionalPercent ` proto represented as JSON/YAML -// and may also be represented as an integer with the assumption that the value is an integral -// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse -// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. -message RuntimeFractionalPercent { - // Default value if the runtime value's for the numerator/denominator keys are not available. - type.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key for a YAML representation of a FractionalPercent. - string runtime_key = 2; -} - -// Identifies a specific ControlPlane instance that Envoy is connected to. -message ControlPlane { - // An opaque control plane identifier that uniquely identifies an instance - // of control plane. This can be used to identify which control plane instance, - // the Envoy is connected to. - string identifier = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/core/config_source.proto b/generated_api_shadow/envoy/api/v2/core/config_source.proto deleted file mode 100644 index 6cf44dbe9bbd2..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/config_source.proto +++ /dev/null @@ -1,185 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "ConfigSourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Configuration sources] - -// xDS API version. This is used to describe both resource and transport -// protocol versions (in distinct configuration fields). -enum ApiVersion { - // When not specified, we assume v2, to ease migration to Envoy's stable API - // versioning. If a client does not support v2 (e.g. due to deprecation), this - // is an invalid value. - AUTO = 0 [deprecated = true]; - - // Use xDS v2 API. - V2 = 1 [deprecated = true]; - - // Use xDS v3 API. - V3 = 2; -} - -// API configuration source. This identifies the API type and cluster that Envoy -// will use to fetch an xDS API. -// [#next-free-field: 9] -message ApiConfigSource { - // APIs may be fetched via either REST or gRPC. - enum ApiType { - // Ideally this would be 'reserved 0' but one can't reserve the default - // value. Instead we throw an exception if this is ever used. - UNSUPPORTED_REST_LEGACY = 0 - [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; - - // REST-JSON v2 API. The `canonical JSON encoding - // `_ for - // the v2 protos is used. - REST = 1; - - // gRPC v2 API. - GRPC = 2; - - // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} - // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state - // with every update, the xDS server only sends what has changed since the last update. - DELTA_GRPC = 3; - } - - // API type (gRPC, REST, delta gRPC) - ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; - - // Cluster names should be used only with REST. If > 1 - // cluster is defined, clusters will be cycled through if any kind of failure - // occurs. - // - // .. note:: - // - // The cluster with name ``cluster_name`` must be statically defined and its - // type must not be ``EDS``. - repeated string cluster_names = 2; - - // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - // services will be cycled through if any kind of failure occurs. - repeated GrpcService grpc_services = 4; - - // For REST APIs, the delay between successive polls. - google.protobuf.Duration refresh_delay = 3; - - // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; - - // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be - // rate limited. - RateLimitSettings rate_limit_settings = 6; - - // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. - bool set_node_on_first_message_only = 7; -} - -// Aggregated Discovery Service (ADS) options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that ADS is to be used. -message AggregatedConfigSource { -} - -// [#not-implemented-hide:] -// Self-referencing config source options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that other data can be obtained from the same server. -message SelfConfigSource { - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; -} - -// Rate Limit settings to be applied for discovery requests made by Envoy. -message RateLimitSettings { - // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a - // default value of 100 will be used. - google.protobuf.UInt32Value max_tokens = 1; - - // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens - // per second will be used. - google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; -} - -// Configuration for :ref:`listeners `, :ref:`clusters -// `, :ref:`routes -// `, :ref:`endpoints -// ` etc. may either be sourced from the -// filesystem or from an xDS API source. Filesystem configs are watched with -// inotify for updates. -// [#next-free-field: 7] -message ConfigSource { - oneof config_source_specifier { - option (validate.required) = true; - - // Path on the filesystem to source and watch for configuration updates. - // When sourcing configuration for :ref:`secret `, - // the certificate and key files are also watched for updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - string path = 1; - - // API configuration source. - ApiConfigSource api_config_source = 2; - - // When set, ADS will be used to fetch resources. The ADS API configuration - // source in the bootstrap configuration is used. - AggregatedConfigSource ads = 3; - - // [#not-implemented-hide:] - // When set, the client will access the resources from the same server it got the - // ConfigSource from, although not necessarily from the same stream. This is similar to the - // :ref:`ads` field, except that the client may use a - // different stream to the same server. As a result, this field can be used for things - // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) - // LDS to RDS on the same server without requiring the management server to know its name - // or required credentials. - // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since - // this field can implicitly mean to use the same stream in the case where the ConfigSource - // is provided via ADS and the specified data can also be obtained via ADS.] - SelfConfigSource self = 5; - } - - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - google.protobuf.Duration initial_fetch_timeout = 4; - - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. - ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/core/event_service_config.proto b/generated_api_shadow/envoy/api/v2/core/event_service_config.proto deleted file mode 100644 index f822f8c6b630d..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/event_service_config.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "EventServiceConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#not-implemented-hide:] -// Configuration of the event reporting service endpoint. -message EventServiceConfig { - oneof config_source_specifier { - option (validate.required) = true; - - // Specifies the gRPC service that hosts the event reporting service. - GrpcService grpc_service = 1; - } -} diff --git a/generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto b/generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto deleted file mode 100644 index 3d646484b359d..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "GrpcMethodListProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC method list] - -// A list of gRPC methods which can be used as an allowlist, for example. -message GrpcMethodList { - message Service { - // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The names of the gRPC methods in this service. - repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; - } - - repeated Service services = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/core/grpc_service.proto b/generated_api_shadow/envoy/api/v2/core/grpc_service.proto deleted file mode 100644 index dd789644e1d71..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/grpc_service.proto +++ /dev/null @@ -1,227 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "GrpcServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC services] - -// gRPC service configuration. This is used by :ref:`ApiConfigSource -// ` and filter configurations. -// [#next-free-field: 6] -message GrpcService { - message EnvoyGrpc { - // The name of the upstream gRPC cluster. SSL credentials will be supplied - // in the :ref:`Cluster ` :ref:`transport_socket - // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // [#next-free-field: 7] - message GoogleGrpc { - // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. - message SslCredentials { - // PEM encoded server root certificates. - DataSource root_certs = 1; - - // PEM encoded client private key. - DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // PEM encoded client certificate chain. - DataSource cert_chain = 3; - } - - // Local channel credentials. Only UDS is supported for now. - // See https://github.com/grpc/grpc/pull/15909. - message GoogleLocalCredentials { - } - - // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call - // credential types. - message ChannelCredentials { - oneof credential_specifier { - option (validate.required) = true; - - SslCredentials ssl_credentials = 1; - - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_default = 2; - - GoogleLocalCredentials local_credentials = 3; - } - } - - // [#next-free-field: 8] - message CallCredentials { - message ServiceAccountJWTAccessCredentials { - string json_key = 1; - - uint64 token_lifetime_seconds = 2; - } - - message GoogleIAMCredentials { - string authorization_token = 1; - - string authority_selector = 2; - } - - message MetadataCredentialsFromPlugin { - string name = 1; - - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - // Security token service configuration that allows Google gRPC to - // fetch security token from an OAuth 2.0 authorization server. - // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and - // https://github.com/grpc/grpc/pull/19587. - // [#next-free-field: 10] - message StsService { - // URI of the token exchange service that handles token exchange requests. - // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - // https://github.com/envoyproxy/protoc-gen-validate/issues/303] - string token_exchange_service_uri = 1; - - // Location of the target service or resource where the client - // intends to use the requested security token. - string resource = 2; - - // Logical name of the target service where the client intends to - // use the requested security token. - string audience = 3; - - // The desired scope of the requested security token in the - // context of the service or resource where the token will be used. - string scope = 4; - - // Type of the requested security token. - string requested_token_type = 5; - - // The path of subject token, a security token that represents the - // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; - - // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; - - // The path of actor token, a security token that represents the identity - // of the acting party. The acting party is authorized to use the - // requested security token and act on behalf of the subject. - string actor_token_path = 8; - - // Type of the actor token. - string actor_token_type = 9; - } - - oneof credential_specifier { - option (validate.required) = true; - - // Access token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. - string access_token = 1; - - // Google Compute Engine credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_compute_engine = 2; - - // Google refresh token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. - string google_refresh_token = 3; - - // Service Account JWT Access credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. - ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; - - // Google IAM credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. - GoogleIAMCredentials google_iam = 5; - - // Custom authenticator credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. - // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. - MetadataCredentialsFromPlugin from_plugin = 6; - - // Custom security token service which implements OAuth 2.0 token exchange. - // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 - // See https://github.com/grpc/grpc/pull/19587. - StsService sts_service = 7; - } - } - - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; - - ChannelCredentials channel_credentials = 2; - - // A set of call credentials that can be composed with `channel credentials - // `_. - repeated CallCredentials call_credentials = 3; - - // The human readable prefix to use when emitting statistics for the gRPC - // service. - // - // .. csv-table:: - // :header: Name, Type, Description - // :widths: 1, 1, 2 - // - // streams_total, Counter, Total number of streams opened - // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; - - // The name of the Google gRPC credentials factory to use. This must have been registered with - // Envoy. If this is empty, a default credentials factory will be used that sets up channel - // credentials based on other configuration parameters. - string credentials_factory_name = 5; - - // Additional configuration for site-specific customizations of the Google - // gRPC library. - google.protobuf.Struct config = 6; - } - - reserved 4; - - oneof target_specifier { - option (validate.required) = true; - - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - EnvoyGrpc envoy_grpc = 1; - - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - GoogleGrpc google_grpc = 2; - } - - // The timeout for the gRPC request. This is the timeout for a specific - // request. - google.protobuf.Duration timeout = 3; - - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. - repeated HeaderValue initial_metadata = 5; -} diff --git a/generated_api_shadow/envoy/api/v2/core/health_check.proto b/generated_api_shadow/envoy/api/v2/core/health_check.proto deleted file mode 100644 index bc4ae3e5c8666..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/health_check.proto +++ /dev/null @@ -1,308 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/event_service_config.proto"; -import "envoy/type/http.proto"; -import "envoy/type/matcher/string.proto"; -import "envoy/type/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Health check] -// * Health checking :ref:`architecture overview `. -// * If health checking is configured for a cluster, additional statistics are emitted. They are -// documented :ref:`here `. - -// Endpoint health status. -enum HealthStatus { - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0; - - // Healthy. - HEALTHY = 1; - - // Unhealthy. - UNHEALTHY = 2; - - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3; - - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - TIMEOUT = 4; - - // Degraded. - DEGRADED = 5; -} - -// [#next-free-field: 23] -message HealthCheck { - // Describes the encoding of the payload bytes in the payload. - message Payload { - oneof payload { - option (validate.required) = true; - - // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_bytes: 1}]; - - // [#not-implemented-hide:] Binary payload. - bytes binary = 2; - } - } - - // [#next-free-field: 12] - message HttpHealthCheck { - // The value of the host header in the HTTP health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The host header can be customized for a specific endpoint by setting the - // :ref:`hostname ` field. - string host = 1; - - // Specifies the HTTP path that will be requested during health checking. For example - // */healthcheck*. - string path = 2 [(validate.rules).string = {min_bytes: 1}]; - - // [#not-implemented-hide:] HTTP specific payload. - Payload send = 3; - - // [#not-implemented-hide:] HTTP specific response. - Payload receive = 4; - - // An optional service name parameter which is used to validate the identity of - // the health checked cluster. See the :ref:`architecture overview - // ` for more information. - // - // .. attention:: - // - // This field has been deprecated in favor of `service_name_matcher` for better flexibility - // over matching with service-cluster name. - string service_name = 5 [deprecated = true]; - - // Specifies a list of HTTP headers that should be added to each request that is sent to the - // health checked cluster. For more information, including details on header value syntax, see - // the documentation on :ref:`custom request headers - // `. - repeated HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request that is sent to the - // health checked cluster. - repeated string request_headers_to_remove = 8; - - // If set, health checks will be made using http/2. - // Deprecated, use :ref:`codec_client_type - // ` instead. - bool use_http2 = 7 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default - // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - // semantics of :ref:`Int64Range `. The start and end of each - // range are required. Only statuses in the range [100, 600) are allowed. - repeated type.Int64Range expected_statuses = 9; - - // Use specified application protocol for health checks. - type.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; - - // An optional service name parameter which is used to validate the identity of - // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview - // ` for more information. - type.matcher.StringMatcher service_name_matcher = 11; - } - - message TcpHealthCheck { - // Empty payloads imply a connect-only health check. - Payload send = 1; - - // When checking the response, “fuzzy” matching is performed such that each - // binary block must be found, and in the order specified, but not - // necessarily contiguous. - repeated Payload receive = 2; - } - - message RedisHealthCheck { - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; - } - - // `grpc.health.v1.Health - // `_-based - // healthcheck. See `gRPC doc `_ - // for details. - message GrpcHealthCheck { - // An optional service name parameter which will be sent to gRPC service in - // `grpc.health.v1.HealthCheckRequest - // `_. - // message. See `gRPC health-checking overview - // `_ for more information. - string service_name = 1; - - // The value of the :authority header in the gRPC health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The authority header can be customized for a specific endpoint by setting - // the :ref:`hostname ` field. - string authority = 2; - } - - // Custom health check. - message CustomHealthCheck { - // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // A custom health checker specific configuration which depends on the custom health checker - // being instantiated. See :api:`envoy/config/health_checker` for reference. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - // Health checks occur over the transport socket specified for the cluster. This implies that if a - // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. - // - // This allows overriding the cluster TLS settings, just for health check connections. - message TlsOptions { - // Specifies the ALPN protocols for health check connections. This is useful if the - // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks - // versus data connections. If empty, no ALPN protocols will be set on health check connections. - repeated string alpn_protocols = 1; - } - - reserved 10; - - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - google.protobuf.Duration timeout = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // The interval between health checks. - google.protobuf.Duration interval = 2 [(validate.rules).duration = { - required: true - gt {} - }]; - - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - google.protobuf.Duration initial_jitter = 20; - - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - google.protobuf.Duration interval_jitter = 3; - - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - uint32 interval_jitter_percent = 18; - - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; - - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Non-serving port for health checking. - google.protobuf.UInt32Value alt_port = 6; - - // Reuse health check connection between health checks. Default is true. - google.protobuf.BoolValue reuse_connection = 7; - - oneof health_checker { - option (validate.required) = true; - - // HTTP health check. - HttpHealthCheck http_health_check = 8; - - // TCP health check. - TcpHealthCheck tcp_health_check = 9; - - // gRPC health check. - GrpcHealthCheck grpc_health_check = 11; - - // Custom health check. - CustomHealthCheck custom_health_check = 13; - } - - // The "no traffic interval" is a special health check interval that is used when a cluster has - // never had traffic routed to it. This lower interval allows cluster information to be kept up to - // date, without sending a potentially large amount of active health checking traffic for no - // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. Note that this interval takes precedence over - // any other. - // - // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy interval" is a health check interval that is used for hosts that are marked as - // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the - // standard health check interval that is defined. - // - // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as unhealthy. For subsequent health checks - // Envoy will shift back to using either "unhealthy interval" if present or the standard health - // check interval that is defined. - // - // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; - - // The "healthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as healthy. For subsequent health checks - // Envoy will shift back to using the standard health check interval that is defined. - // - // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; - - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - string event_log_path = 17; - - // [#not-implemented-hide:] - // The gRPC service for the health check event service. - // If empty, health check events won't be sent to a remote endpoint. - EventServiceConfig event_service = 22; - - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - bool always_log_health_check_failures = 19; - - // This allows overriding the cluster TLS settings, just for health check connections. - TlsOptions tls_options = 21; -} diff --git a/generated_api_shadow/envoy/api/v2/core/http_uri.proto b/generated_api_shadow/envoy/api/v2/core/http_uri.proto deleted file mode 100644 index cd1a0660e330a..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/http_uri.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "HttpUriProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP Service URI ] - -// Envoy external URI descriptor -message HttpUri { - // The HTTP server URI. It should be a full FQDN with protocol, host and path. - // - // Example: - // - // .. code-block:: yaml - // - // uri: https://www.googleapis.com/oauth2/v1/certs - // - string uri = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Specify how `uri` is to be fetched. Today, this requires an explicit - // cluster, but in the future we may support dynamic cluster creation or - // inline DNS resolution. See `issue - // `_. - oneof http_upstream_type { - option (validate.required) = true; - - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 [(validate.rules).duration = { - required: true - gte {} - }]; -} diff --git a/generated_api_shadow/envoy/api/v2/core/protocol.proto b/generated_api_shadow/envoy/api/v2/core/protocol.proto deleted file mode 100644 index ae1a86424cf07..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/protocol.proto +++ /dev/null @@ -1,297 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "ProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Protocol options] - -// [#not-implemented-hide:] -message TcpProtocolOptions { -} - -message UpstreamHttpProtocolOptions { - // Set transport socket `SNI `_ for new - // upstream connections based on the downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - bool auto_sni = 1; - - // Automatic validate upstream presented certificate for new upstream connections based on the - // downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - // This field is intended to set with `auto_sni` field. - bool auto_san_validation = 2; -} - -// [#next-free-field: 6] -message HttpProtocolOptions { - // Action to take when Envoy receives client request with header names containing underscore - // characters. - // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented - // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore - // characters. - enum HeadersWithUnderscoresAction { - // Allow headers with underscores. This is the default behavior. - ALLOW = 0; - - // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests - // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter - // is incremented for each rejected request. - REJECT_REQUEST = 1; - - // Drop the header with name containing underscores. The header is dropped before the filter chain is - // invoked and as such filters will not see dropped headers. The - // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. - DROP_HEADER = 2; - } - - // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. When the - // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 - // downstream connection a drain sequence will occur prior to closing the connection, see - // :ref:`drain_timeout - // `. - // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - google.protobuf.Duration idle_timeout = 1; - - // The maximum duration of a connection. The duration is defined as a period since a connection - // was established. If not set, there is no max duration. When max_connection_duration is reached - // the connection will be closed. Drain sequence will occur prior to closing the connection if - // if's applicable. See :ref:`drain_timeout - // `. - // Note: not implemented for upstream connections. - google.protobuf.Duration max_connection_duration = 3; - - // The maximum number of headers. If unconfigured, the default - // maximum number of request headers allowed is 100. Requests that exceed this limit will receive - // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be - // reset independent of any other timeouts. If not specified, this value is not set. - google.protobuf.Duration max_stream_duration = 4; - - // Action to take when a client request with a header name containing underscore characters is received. - // If this setting is not specified, the value defaults to ALLOW. - // Note: upstream responses are not affected by this setting. - HeadersWithUnderscoresAction headers_with_underscores_action = 5; -} - -// [#next-free-field: 6] -message Http1ProtocolOptions { - message HeaderKeyFormat { - message ProperCaseWords { - } - - oneof header_format { - option (validate.required) = true; - - // Formats the header by proper casing words: the first character and any character following - // a special character will be capitalized if it's an alpha character. For example, - // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". - // Note that while this results in most headers following conventional casing, certain headers - // are not covered. For example, the "TE" header will be formatted as "Te". - ProperCaseWords proper_case_words = 1; - } - } - - // Handle HTTP requests with absolute URLs in the requests. These requests - // are generally sent by clients to forward/explicit proxies. This allows clients to configure - // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - // *http_proxy* environment variable. - google.protobuf.BoolValue allow_absolute_url = 1; - - // Handle incoming HTTP/1.0 and HTTP 0.9 requests. - // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 - // style connect logic, dechunking, and handling lack of client host iff - // *default_host_for_http_10* is configured. - bool accept_http_10 = 2; - - // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as - // Envoy does not otherwise support HTTP/1.0 without a Host header. - // This is a no-op if *accept_http_10* is not true. - string default_host_for_http_10 = 3; - - // Describes how the keys for response headers should be formatted. By default, all header keys - // are lower cased. - HeaderKeyFormat header_key_format = 4; - - // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. - // - // .. attention:: - // - // Note that this only happens when Envoy is chunk encoding which occurs when: - // - The request is HTTP/1.1. - // - Is neither a HEAD only request nor a HTTP Upgrade. - // - Not a response to a HEAD request. - // - The content length header is not present. - bool enable_trailers = 5; -} - -// [#next-free-field: 14] -message Http2ProtocolOptions { - // Defines a parameter to be sent in the SETTINGS frame. - // See `RFC7540, sec. 6.5.1 `_ for details. - message SettingsParameter { - // The 16 bit parameter identifier. - google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, - (validate.rules).message = {required: true} - ]; - - // The 32 bit parameter value. - google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; - } - - // `Maximum table size `_ - // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values - // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header - // compression. - google.protobuf.UInt32Value hpack_table_size = 1; - - // `Maximum concurrent streams `_ - // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) - // and defaults to 2147483647. - // - // For upstream connections, this also limits how many streams Envoy will initiate concurrently - // on a single connection. If the limit is reached, Envoy may queue requests or establish - // additional connections (as allowed per circuit breaker limits). - google.protobuf.UInt32Value max_concurrent_streams = 2 - [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; - - // `Initial stream-level flow-control window - // `_ size. Valid values range from 65535 - // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 - // (256 * 1024 * 1024). - // - // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default - // window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the codec buffers. - google.protobuf.UInt32Value initial_stream_window_size = 3 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Similar to *initial_stream_window_size*, but for connection-level flow-control - // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. - google.protobuf.UInt32Value initial_connection_window_size = 4 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Allows proxying Websocket and other upgrades over H2 connect. - bool allow_connect = 5; - - // [#not-implemented-hide:] Hiding until envoy has full metadata support. - // Still under implementation. DO NOT USE. - // - // Allows metadata. See [metadata - // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more - // information. - bool allow_metadata = 6; - - // Limit the number of pending outbound downstream frames of all types (frames that are waiting to - // be written into the socket). Exceeding this limit triggers flood mitigation and connection is - // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due - // to flood mitigation. The default limit is 10000. - // [#comment:TODO: implement same limits for upstream outbound frames as well.] - google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, - // preventing high memory utilization when receiving continuous stream of these frames. Exceeding - // this limit triggers flood mitigation and connection is terminated. The - // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood - // mitigation. The default limit is 1000. - // [#comment:TODO: implement same limits for upstream outbound frames as well.] - google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an - // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but - // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` - // stat tracks the number of connections terminated due to flood mitigation. - // Setting this to 0 will terminate connection upon receiving first frame with an empty payload - // and no end stream flag. The default limit is 1. - // [#comment:TODO: implement same limits for upstream inbound frames as well.] - google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; - - // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number - // of PRIORITY frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // max_inbound_priority_frames_per_stream * (1 + inbound_streams) - // - // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 100. - // [#comment:TODO: implement same limits for upstream inbound frames as well.] - google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; - - // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number - // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // 1 + 2 * (inbound_streams + - // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) - // - // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 10. - // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, - // but more complex implementations that try to estimate available bandwidth require at least 2. - // [#comment:TODO: implement same limits for upstream inbound frames as well.] - google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 - [(validate.rules).uint32 = {gte: 1}]; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; - - // [#not-implemented-hide:] - // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: - // - // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by - // Envoy. - // - // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field - // 'allow_connect'. - // - // Note that custom parameters specified through this field can not also be set in the - // corresponding named parameters: - // - // .. code-block:: text - // - // ID Field Name - // ---------------- - // 0x1 hpack_table_size - // 0x3 max_concurrent_streams - // 0x4 initial_stream_window_size - // - // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies - // between custom parameters with the same identifier will trigger a failure. - // - // See `IANA HTTP/2 Settings - // `_ for - // standardized identifiers. - repeated SettingsParameter custom_settings_parameters = 13; -} - -// [#not-implemented-hide:] -message GrpcProtocolOptions { - Http2ProtocolOptions http2_protocol_options = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/core/socket_option.proto b/generated_api_shadow/envoy/api/v2/core/socket_option.proto deleted file mode 100644 index 39678ad1b8bc6..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/socket_option.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "SocketOptionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Socket Option ] - -// Generic socket option message. This would be used to set socket options that -// might not exist in upstream kernels or precompiled Envoy binaries. -// [#next-free-field: 7] -message SocketOption { - enum SocketState { - // Socket options are applied after socket creation but before binding the socket to a port - STATE_PREBIND = 0; - - // Socket options are applied after binding the socket to a port but before calling listen() - STATE_BOUND = 1; - - // Socket options are applied after calling listen() - STATE_LISTENING = 2; - } - - // An optional name to give this socket option for debugging, etc. - // Uniqueness is not required and no special meaning is assumed. - string description = 1; - - // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - int64 level = 2; - - // The numeric name as passed to setsockopt - int64 name = 3; - - oneof value { - option (validate.required) = true; - - // Because many sockopts take an int value. - int64 int_value = 4; - - // Otherwise it's a byte buffer. - bytes buf_value = 5; - } - - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/discovery.proto b/generated_api_shadow/envoy/api/v2/discovery.proto deleted file mode 100644 index da2690f867fc3..0000000000000 --- a/generated_api_shadow/envoy/api/v2/discovery.proto +++ /dev/null @@ -1,234 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; -import "google/rpc/status.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "DiscoveryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.discovery.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common discovery API components] - -// A DiscoveryRequest requests a set of versioned resources of the same type for -// a given Envoy node on some API. -// [#next-free-field: 7] -message DiscoveryRequest { - // The version_info provided in the request messages will be the version_info - // received with the most recent successfully processed response or empty on - // the first request. It is expected that no new request is sent after a - // response is received until the Envoy instance is ready to ACK/NACK the new - // configuration. ACK/NACK takes place by returning the new API config version - // as applied or the previous API config version respectively. Each type_url - // (see below) has an independent version associated with it. - string version_info = 1; - - // The node making the request. - core.Node node = 2; - - // List of resources to subscribe to, e.g. list of cluster names or a route - // configuration name. If this is empty, all resources for the API are - // returned. LDS/CDS may have empty resource_names, which will cause all - // resources for the Envoy instance to be returned. The LDS and CDS responses - // will then imply a number of resources that need to be fetched via EDS/RDS, - // which will be explicitly enumerated in resource_names. - repeated string resource_names = 3; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit - // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is - // required for ADS. - string type_url = 4; - - // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above - // discussion on version_info and the DiscoveryResponse nonce comment. This - // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, - // or 2) the client has not yet accepted an update in this xDS stream (unlike - // delta, where it is populated only for new explicit ACKs). - string response_nonce = 5; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* provides the Envoy - // internal exception related to the failure. It is only intended for consumption during manual - // debugging, the string provided is not guaranteed to be stable across Envoy versions. - google.rpc.Status error_detail = 6; -} - -// [#next-free-field: 7] -message DiscoveryResponse { - // The version of the response data. - string version_info = 1; - - // The response resources. These resources are typed and depend on the API being called. - repeated google.protobuf.Any resources = 2; - - // [#not-implemented-hide:] - // Canary is used to support two Envoy command line flags: - // - // * --terminate-on-canary-transition-failure. When set, Envoy is able to - // terminate if it detects that configuration is stuck at canary. Consider - // this example sequence of updates: - // - Management server applies a canary config successfully. - // - Management server rolls back to a production config. - // - Envoy rejects the new production config. - // Since there is no sensible way to continue receiving configuration - // updates, Envoy will then terminate and apply production config from a - // clean slate. - // * --dry-run-canary. When set, a canary response will never be applied, only - // validated via a dry run. - bool canary = 3; - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). - string type_url = 4; - - // For gRPC based subscriptions, the nonce provides a way to explicitly ack a - // specific DiscoveryResponse in a following DiscoveryRequest. Additional - // messages may have been sent by Envoy to the management server for the - // previous version on the stream prior to this DiscoveryResponse, that were - // unprocessed at response send time. The nonce allows the management server - // to ignore any further DiscoveryRequests for the previous version until a - // DiscoveryRequest bearing the nonce. The nonce is optional and is not - // required for non-stream based xDS implementations. - string nonce = 5; - - // [#not-implemented-hide:] - // The control plane instance that sent the response. - core.ControlPlane control_plane = 6; -} - -// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC -// endpoint for Delta xDS. -// -// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full -// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a -// diff to the state of a xDS client. -// In Delta XDS there are per-resource versions, which allow tracking state at -// the resource granularity. -// An xDS Delta session is always in the context of a gRPC bidirectional -// stream. This allows the xDS server to keep track of the state of xDS clients -// connected to it. -// -// In Delta xDS the nonce field is required and used to pair -// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. -// Optionally, a response message level system_version_info is present for -// debugging purposes only. -// -// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest -// can be either or both of: [1] informing the server of what resources the -// client has gained/lost interest in (using resource_names_subscribe and -// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from -// the server (using response_nonce, with presence of error_detail making it a NACK). -// Additionally, the first message (for a given type_url) of a reconnected gRPC stream -// has a third role: informing the server of the resources (and their versions) -// that the client already possesses, using the initial_resource_versions field. -// -// As with state-of-the-world, when multiple resource types are multiplexed (ADS), -// all requests/acknowledgments/updates are logically walled off by type_url: -// a Cluster ACK exists in a completely separate world from a prior Route NACK. -// In particular, initial_resource_versions being sent at the "start" of every -// gRPC stream actually entails a message for each type_url, each with its own -// initial_resource_versions. -// [#next-free-field: 8] -message DeltaDiscoveryRequest { - // The node making the request. - core.Node node = 1; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". - string type_url = 2; - - // DeltaDiscoveryRequests allow the client to add or remove individual - // resources to the set of tracked resources in the context of a stream. - // All resource names in the resource_names_subscribe list are added to the - // set of tracked resources and all resource names in the resource_names_unsubscribe - // list are removed from the set of tracked resources. - // - // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or - // resource_names_unsubscribe list simply means that no resources are to be - // added or removed to the resource list. - // *Like* state-of-the-world xDS, the server must send updates for all tracked - // resources, but can also send updates for resources the client has not subscribed to. - // - // NOTE: the server must respond with all resources listed in resource_names_subscribe, - // even if it believes the client has the most recent version of them. The reason: - // the client may have dropped them, but then regained interest before it had a chance - // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. - // - // These two fields can be set in any DeltaDiscoveryRequest, including ACKs - // and initial_resource_versions. - // - // A list of Resource names to add to the list of tracked resources. - repeated string resource_names_subscribe = 3; - - // A list of Resource names to remove from the list of tracked resources. - repeated string resource_names_unsubscribe = 4; - - // Informs the server of the versions of the resources the xDS client knows of, to enable the - // client to continue the same logical xDS session even in the face of gRPC stream reconnection. - // It will not be populated: [1] in the very first stream of a session, since the client will - // not yet have any resources, [2] in any message after the first in a stream (for a given - // type_url), since the server will already be correctly tracking the client's state. - // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) - // The map's keys are names of xDS resources known to the xDS client. - // The map's values are opaque resource versions. - map initial_resource_versions = 5; - - // When the DeltaDiscoveryRequest is a ACK or NACK message in response - // to a previous DeltaDiscoveryResponse, the response_nonce must be the - // nonce in the DeltaDiscoveryResponse. - // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. - string response_nonce = 6; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* - // provides the Envoy internal exception related to the failure. - google.rpc.Status error_detail = 7; -} - -// [#next-free-field: 7] -message DeltaDiscoveryResponse { - // The version of the response data (used for debugging). - string system_version_info = 1; - - // The response resources. These are typed resources, whose types must match - // the type_url field. - repeated Resource resources = 2; - - // field id 3 IS available! - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. - string type_url = 4; - - // Resources names of resources that have be deleted and to be removed from the xDS Client. - // Removed resources for missing resources can be ignored. - repeated string removed_resources = 6; - - // The nonce provides a way for DeltaDiscoveryRequests to uniquely - // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - string nonce = 5; -} - -message Resource { - // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; - - // The aliases are a list of other names that this resource can go by. - repeated string aliases = 4; - - // The resource level version. It allows xDS to track the state of individual - // resources. - string version = 1; - - // The resource being tracked. - google.protobuf.Any resource = 2; -} diff --git a/generated_api_shadow/envoy/api/v2/eds.proto b/generated_api_shadow/envoy/api/v2/eds.proto deleted file mode 100644 index d757f17fc2f37..0000000000000 --- a/generated_api_shadow/envoy/api/v2/eds.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/endpoint.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "EdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.endpoint.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: EDS] -// Endpoint discovery :ref:`architecture overview ` - -service EndpointDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.ClusterLoadAssignment"; - - // The resource_names field in DiscoveryRequest specifies a list of clusters - // to subscribe to updates for. - rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaEndpoints(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:endpoints"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message EdsDummy { -} diff --git a/generated_api_shadow/envoy/api/v2/endpoint.proto b/generated_api_shadow/envoy/api/v2/endpoint.proto deleted file mode 100644 index 70bac3c6c4f6c..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint.proto +++ /dev/null @@ -1,119 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/endpoint/endpoint_components.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "EndpointProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Endpoint configuration] -// Endpoint discovery :ref:`architecture overview ` - -// Each route from RDS will map to a single cluster or traffic split across -// clusters using weights expressed in the RDS WeightedCluster. -// -// With EDS, each cluster is treated independently from a LB perspective, with -// LB taking place between the Localities within a cluster and at a finer -// granularity between the hosts within a locality. The percentage of traffic -// for each endpoint is determined by both its load_balancing_weight, and the -// load_balancing_weight of its locality. First, a locality will be selected, -// then an endpoint within that locality will be chose based on its weight. -// [#next-free-field: 6] -message ClusterLoadAssignment { - // Load balancing policy settings. - // [#next-free-field: 6] - message Policy { - // [#not-implemented-hide:] - message DropOverload { - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Percentage of traffic that should be dropped for the category. - type.FractionalPercent drop_percentage = 2; - } - - reserved 1; - - // Action to trim the overall incoming traffic to protect the upstream - // hosts. This action allows protection in case the hosts are unable to - // recover from an outage, or unable to autoscale or unable to handle - // incoming traffic volume for any reason. - // - // At the client each category is applied one after the other to generate - // the 'actual' drop percentage on all outgoing traffic. For example: - // - // .. code-block:: json - // - // { "drop_overloads": [ - // { "category": "throttle", "drop_percentage": 60 } - // { "category": "lb", "drop_percentage": 50 } - // ]} - // - // The actual drop percentages applied to the traffic at the clients will be - // "throttle"_drop = 60% - // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. - // actual_outgoing_load = 20% // remaining after applying all categories. - // [#not-implemented-hide:] - repeated DropOverload drop_overloads = 2; - - // Priority levels and localities are considered overprovisioned with this - // factor (in percentage). This means that we don't consider a priority - // level or locality unhealthy until the percentage of healthy hosts - // multiplied by the overprovisioning factor drops below 100. - // With the default value 140(1.4), Envoy doesn't consider a priority level - // or a locality unhealthy until their percentage of healthy hosts drops - // below 72%. For example: - // - // .. code-block:: json - // - // { "overprovisioning_factor": 100 } - // - // Read more at :ref:`priority levels ` and - // :ref:`localities `. - google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; - - // The max time until which the endpoints from this assignment can be used. - // If no new assignments are received before this time expires the endpoints - // are considered stale and should be marked unhealthy. - // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; - - // The flag to disable overprovisioning. If it is set to true, - // :ref:`overprovisioning factor - // ` will be ignored - // and Envoy will not perform graceful failover between priority levels or - // localities as endpoints become unhealthy. Otherwise Envoy will perform - // graceful failover as :ref:`overprovisioning factor - // ` suggests. - // [#not-implemented-hide:] - bool disable_overprovisioning = 5 [deprecated = true]; - } - - // Name of the cluster. This will be the :ref:`service_name - // ` value if specified - // in the cluster :ref:`EdsClusterConfig - // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // List of endpoints to load balance to. - repeated endpoint.LocalityLbEndpoints endpoints = 2; - - // Map of named endpoints that can be referenced in LocalityLbEndpoints. - // [#not-implemented-hide:] - map named_endpoints = 5; - - // Load balancing policy settings. - Policy policy = 4; -} diff --git a/generated_api_shadow/envoy/api/v2/endpoint/BUILD b/generated_api_shadow/envoy/api/v2/endpoint/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto b/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto deleted file mode 100644 index 247c9ae265a56..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.endpoint; - -import public "envoy/api/v2/endpoint/endpoint_components.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; -option java_outer_classname = "EndpointProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto b/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto deleted file mode 100644 index 78d45e2e08d06..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto +++ /dev/null @@ -1,148 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.endpoint; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/health_check.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; -option java_outer_classname = "EndpointComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Endpoints] - -// Upstream host identifier. -message Endpoint { - // The optional health check configuration. - message HealthCheckConfig { - // Optional alternative health check port value. - // - // By default the health check address port of an upstream host is the same - // as the host's serving address port. This provides an alternative health - // check port. Setting this with a non-zero value allows an upstream host - // to have different health check address port. - uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; - - // By default, the host header for L7 health checks is controlled by cluster level configuration - // (see: :ref:`host ` and - // :ref:`authority `). Setting this - // to a non-empty value allows overriding the cluster level configuration for a specific - // endpoint. - string hostname = 2; - } - - // The upstream host address. - // - // .. attention:: - // - // The form of host address depends on the given cluster type. For STATIC or EDS, - // it is expected to be a direct IP address (or something resolvable by the - // specified :ref:`resolver ` - // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, - // and will be resolved via DNS. - core.Address address = 1; - - // The optional health check configuration is used as configuration for the - // health checker to contact the health checked host. - // - // .. attention:: - // - // This takes into effect only for upstream clusters with - // :ref:`active health checking ` enabled. - HealthCheckConfig health_check_config = 2; - - // The hostname associated with this endpoint. This hostname is not used for routing or address - // resolution. If provided, it will be associated with the endpoint, and can be used for features - // that require a hostname, like - // :ref:`auto_host_rewrite `. - string hostname = 3; -} - -// An Endpoint that Envoy can route traffic to. -// [#next-free-field: 6] -message LbEndpoint { - // Upstream host identifier or a named reference. - oneof host_identifier { - Endpoint endpoint = 1; - - // [#not-implemented-hide:] - string endpoint_name = 5; - } - - // Optional health status when known and supplied by EDS server. - core.HealthStatus health_status = 2; - - // The endpoint metadata specifies values that may be used by the load - // balancer to select endpoints in a cluster for a given request. The filter - // name should be specified as *envoy.lb*. An example boolean key-value pair - // is *canary*, providing the optional canary status of the upstream host. - // This may be matched against in a route's - // :ref:`RouteAction ` metadata_match field - // to subset the endpoints considered in cluster load balancing. - core.Metadata metadata = 3; - - // The optional load balancing weight of the upstream host; at least 1. - // Envoy uses the load balancing weight in some of the built in load - // balancers. The load balancing weight for an endpoint is divided by the sum - // of the weights of all endpoints in the endpoint's locality to produce a - // percentage of traffic for the endpoint. This percentage is then further - // weighted by the endpoint's locality's load balancing weight from - // LocalityLbEndpoints. If unspecified, each host is presumed to have equal - // weight in a locality. The sum of the weights of all endpoints in the - // endpoint's locality must not exceed uint32_t maximal value (4294967295). - google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; -} - -// A group of endpoints belonging to a Locality. -// One can have multiple LocalityLbEndpoints for a locality, but this is -// generally only done if the different groups need to have different load -// balancing weights or different priorities. -// [#next-free-field: 7] -message LocalityLbEndpoints { - // Identifies location of where the upstream hosts run. - core.Locality locality = 1; - - // The group of endpoints belonging to the locality specified. - repeated LbEndpoint lb_endpoints = 2; - - // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load - // balancing weight for a locality is divided by the sum of the weights of all - // localities at the same priority level to produce the effective percentage - // of traffic for the locality. The sum of the weights of all localities at - // the same priority level must not exceed uint32_t maximal value (4294967295). - // - // Locality weights are only considered when :ref:`locality weighted load - // balancing ` is - // configured. These weights are ignored otherwise. If no weights are - // specified when locality weighted load balancing is enabled, the locality is - // assigned no load. - google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Optional: the priority for this LocalityLbEndpoints. If unspecified this will - // default to the highest priority (0). - // - // Under usual circumstances, Envoy will only select endpoints for the highest - // priority (0). In the event all endpoints for a particular priority are - // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the - // next highest priority group. - // - // Priorities should range from 0 (highest) to N (lowest) without skipping. - uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; - - // Optional: Per locality proximity value which indicates how close this - // locality is from the source locality. This value only provides ordering - // information (lower the value, closer it is to the source locality). - // This will be consumed by load balancing schemes that need proximity order - // to determine where to route the requests. - // [#not-implemented-hide:] - google.protobuf.UInt32Value proximity = 6; -} diff --git a/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto b/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto deleted file mode 100644 index 928aed6102df8..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto +++ /dev/null @@ -1,157 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.endpoint; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; -option java_outer_classname = "LoadReportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// These are stats Envoy reports to GLB every so often. Report frequency is -// defined by -// :ref:`LoadStatsResponse.load_reporting_interval`. -// Stats per upstream region/zone and optionally per subzone. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -// [#next-free-field: 9] -message UpstreamLocalityStats { - // Name of zone, region and optionally endpoint group these metrics were - // collected from. Zone and region names could be empty if unknown. - core.Locality locality = 1; - - // The total number of requests successfully completed by the endpoints in the - // locality. - uint64 total_successful_requests = 2; - - // The total number of unfinished requests - uint64 total_requests_in_progress = 3; - - // The total number of requests that failed due to errors at the endpoint, - // aggregated over all endpoints in the locality. - uint64 total_error_requests = 4; - - // The total number of requests that were issued by this Envoy since - // the last report. This information is aggregated over all the - // upstream endpoints in the locality. - uint64 total_issued_requests = 8; - - // Stats for multi-dimensional load balancing. - repeated EndpointLoadMetricStats load_metric_stats = 5; - - // Endpoint granularity stats information for this locality. This information - // is populated if the Server requests it by setting - // :ref:`LoadStatsResponse.report_endpoint_granularity`. - repeated UpstreamEndpointStats upstream_endpoint_stats = 7; - - // [#not-implemented-hide:] The priority of the endpoint group these metrics - // were collected from. - uint32 priority = 6; -} - -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -// [#next-free-field: 8] -message UpstreamEndpointStats { - // Upstream host address. - core.Address address = 1; - - // Opaque and implementation dependent metadata of the - // endpoint. Envoy will pass this directly to the management server. - google.protobuf.Struct metadata = 6; - - // The total number of requests successfully completed by the endpoints in the - // locality. These include non-5xx responses for HTTP, where errors - // originate at the client and the endpoint responded successfully. For gRPC, - // the grpc-status values are those not covered by total_error_requests below. - uint64 total_successful_requests = 2; - - // The total number of unfinished requests for this endpoint. - uint64 total_requests_in_progress = 3; - - // The total number of requests that failed due to errors at the endpoint. - // For HTTP these are responses with 5xx status codes and for gRPC the - // grpc-status values: - // - // - DeadlineExceeded - // - Unimplemented - // - Internal - // - Unavailable - // - Unknown - // - DataLoss - uint64 total_error_requests = 4; - - // The total number of requests that were issued to this endpoint - // since the last report. A single TCP connection, HTTP or gRPC - // request or stream is counted as one request. - uint64 total_issued_requests = 7; - - // Stats for multi-dimensional load balancing. - repeated EndpointLoadMetricStats load_metric_stats = 5; -} - -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -message EndpointLoadMetricStats { - // Name of the metric; may be empty. - string metric_name = 1; - - // Number of calls that finished and included this metric. - uint64 num_requests_finished_with_metric = 2; - - // Sum of metric values across all calls that finished with this metric for - // load_reporting_interval. - double total_metric_value = 3; -} - -// Per cluster load stats. Envoy reports these stats a management server in a -// :ref:`LoadStatsRequest` -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -// Next ID: 7 -// [#next-free-field: 7] -message ClusterStats { - message DroppedRequests { - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Total number of deliberately dropped requests for the category. - uint64 dropped_count = 2; - } - - // The name of the cluster. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The eds_cluster_config service_name of the cluster. - // It's possible that two clusters send the same service_name to EDS, - // in that case, the management server is supposed to do aggregation on the load reports. - string cluster_service_name = 6; - - // Need at least one. - repeated UpstreamLocalityStats upstream_locality_stats = 2 - [(validate.rules).repeated = {min_items: 1}]; - - // Cluster-level stats such as total_successful_requests may be computed by - // summing upstream_locality_stats. In addition, below there are additional - // cluster-wide stats. - // - // The total number of dropped requests. This covers requests - // deliberately dropped by the drop_overload policy and circuit breaking. - uint64 total_dropped_requests = 3; - - // Information about deliberately dropped requests for each category specified - // in the DropOverload policy. - repeated DroppedRequests dropped_requests = 5; - - // Period over which the actual load report occurred. This will be guaranteed to include every - // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy - // and the *LoadStatsResponse* message sent from the management server, this may be longer than - // the requested load reporting interval in the *LoadStatsResponse*. - google.protobuf.Duration load_report_interval = 4; -} diff --git a/generated_api_shadow/envoy/api/v2/lds.proto b/generated_api_shadow/envoy/api/v2/lds.proto deleted file mode 100644 index 01d9949777dd8..0000000000000 --- a/generated_api_shadow/envoy/api/v2/lds.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/listener.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "LdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Listener] -// Listener :ref:`configuration overview ` - -// The Envoy instance initiates an RPC at startup to discover a list of -// listeners. Updates are delivered via streaming from the LDS server and -// consist of a complete update of all listeners. Existing connections will be -// allowed to drain from listeners that are no longer present. -service ListenerDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.Listener"; - - rpc DeltaListeners(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc StreamListeners(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:listeners"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message LdsDummy { -} diff --git a/generated_api_shadow/envoy/api/v2/listener.proto b/generated_api_shadow/envoy/api/v2/listener.proto deleted file mode 100644 index 1fdd202de42a9..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener.proto +++ /dev/null @@ -1,248 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/socket_option.proto"; -import "envoy/api/v2/listener/listener_components.proto"; -import "envoy/api/v2/listener/udp_listener_config.proto"; -import "envoy/config/filter/accesslog/v2/accesslog.proto"; -import "envoy/config/listener/v2/api_listener.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "ListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Listener configuration] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 23] -message Listener { - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - - // [#not-implemented-hide:] - message DeprecatedV1 { - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // This is deprecated in v2, all Listeners will bind to their port. An - // additional filter chain must be created for every original destination - // port this listener may redirect to in v2, with the original port - // specified in the FilterChainMatch destination_port field. - // - // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] - google.protobuf.BoolValue bind_to_port = 1; - } - - // Configuration for listener connection balancing. - message ConnectionBalanceConfig { - // A connection balancer implementation that does exact balancing. This means that a lock is - // held during balancing so that connection counts are nearly exactly balanced between worker - // threads. This is "nearly" exact in the sense that a connection might close in parallel thus - // making the counts incorrect, but this should be rectified on the next accept. This balancer - // sacrifices accept throughput for accuracy and should be used when there are a small number of - // connections that rarely cycle (e.g., service mesh gRPC egress). - message ExactBalance { - } - - oneof balance_type { - option (validate.required) = true; - - // If specified, the listener will use the exact connection balancer. - ExactBalance exact_balance = 1; - } - } - - reserved 14; - - // The unique name by which this listener is known. If no name is provided, - // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - // updated or removed via :ref:`LDS ` a unique name must be provided. - string name = 1; - - // The address that the listener should listen on. In general, the address must be unique, though - // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on - // Linux as the actual port will be allocated by the OS. - core.Address address = 2 [(validate.rules).message = {required: true}]; - - // A list of filter chains to consider for this listener. The - // :ref:`FilterChain ` with the most specific - // :ref:`FilterChainMatch ` criteria is used on a - // connection. - // - // Example using SNI for filter chain selection can be found in the - // :ref:`FAQ entry `. - repeated listener.FilterChain filter_chains = 3; - - // If a connection is redirected using *iptables*, the port on which the proxy - // receives it might be different from the original destination address. When this flag is set to - // true, the listener hands off redirected connections to the listener associated with the - // original destination address. If there is no listener associated with the original destination - // address, the connection is handled by the listener that receives it. Defaults to false. - // - // .. attention:: - // - // This field is deprecated. Use :ref:`an original_dst ` - // :ref:`listener filter ` instead. - // - // Note that hand off to another listener is *NOT* performed without this flag. Once - // :ref:`FilterChainMatch ` is implemented this flag - // will be removed, as filter chain matching can be used to select a filter chain based on the - // restored destination address. - google.protobuf.BoolValue use_original_dst = 4 [deprecated = true]; - - // Soft limit on size of the listener’s new connection read and write buffers. - // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; - - // Listener metadata. - core.Metadata metadata = 6; - - // [#not-implemented-hide:] - DeprecatedV1 deprecated_v1 = 7; - - // The type of draining to perform at a listener-wide level. - DrainType drain_type = 8; - - // Listener filters have the opportunity to manipulate and augment the connection metadata that - // is used in connection filter chain matching, for example. These filters are run before any in - // :ref:`filter_chains `. Order matters as the - // filters are processed sequentially right after a socket has been accepted by the listener, and - // before a connection is created. - // UDP Listener filters can be specified when the protocol in the listener socket address in - // :ref:`protocol ` is :ref:`UDP - // `. - // UDP listeners currently support a single filter. - repeated listener.ListenerFilter listener_filters = 9; - - // The timeout to wait for all listener filters to complete operation. If the timeout is reached, - // the accepted socket is closed without a connection being created unless - // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the - // timeout. If not specified, a default timeout of 15s is used. - google.protobuf.Duration listener_filters_timeout = 15; - - // Whether a connection should be created when listener filters timeout. Default is false. - // - // .. attention:: - // - // Some listener filters, such as :ref:`Proxy Protocol filter - // `, should not be used with this option. It will cause - // unexpected behavior when a connection is created. - bool continue_on_listener_filters_timeout = 17; - - // Whether the listener should be set as a transparent socket. - // When this flag is set to true, connections can be redirected to the listener using an - // *iptables* *TPROXY* target, in which case the original source and destination addresses and - // ports are preserved on accepted connections. This flag should be used in combination with - // :ref:`an original_dst ` :ref:`listener filter - // ` to mark the connections' local addresses as - // "restored." This can be used to hand off each redirected connection to another listener - // associated with the connection's destination address. Direct connections to the socket without - // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are - // therefore treated as if they were redirected. - // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. - // When this flag is not set (default), the socket is not modified, i.e. the transparent option - // is neither set nor reset. - google.protobuf.BoolValue transparent = 10; - - // Whether the listener should set the *IP_FREEBIND* socket option. When this - // flag is set to true, listeners can be bound to an IP address that is not - // configured on the system running Envoy. When this flag is set to false, the - // option *IP_FREEBIND* is disabled on the socket. When this flag is not set - // (default), the socket is not modified, i.e. the option is neither enabled - // nor disabled. - google.protobuf.BoolValue freebind = 11; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.SocketOption socket_options = 13; - - // Whether the listener should accept TCP Fast Open (TFO) connections. - // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on - // the socket, with a queue length of the specified size - // (see `details in RFC7413 `_). - // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. - // When this flag is not set (default), the socket is not modified, - // i.e. the option is neither enabled nor disabled. - // - // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - // TCP_FASTOPEN. - // See `ip-sysctl.txt `_. - // - // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. - // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - - // Specifies the intended direction of the traffic relative to the local Envoy. - // This property is required on Windows for listeners using the original destination filter, - // see :ref:`Original Destination `. - core.TrafficDirection traffic_direction = 16; - - // If the protocol in the listener socket address in :ref:`protocol - // ` is :ref:`UDP - // `, this field specifies the actual udp - // listener to create, i.e. :ref:`udp_listener_name - // ` = "raw_udp_listener" for - // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". - listener.UdpListenerConfig udp_listener_config = 18; - - // Used to represent an API listener, which is used in non-proxy clients. The type of API - // exposed to the non-proxy application depends on the type of API listener. - // When this field is set, no other field except for :ref:`name` - // should be set. - // - // .. note:: - // - // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, - // not LDS. - // - // [#next-major-version: In the v3 API, instead of this messy approach where the socket - // listener fields are directly in the top-level Listener message and the API listener types - // are in the ApiListener message, the socket listener messages should be in their own message, - // and the top-level Listener should essentially be a oneof that selects between the - // socket listener and the various types of API listener. That way, a given Listener message - // can structurally only contain the fields of the relevant type.] - config.listener.v2.ApiListener api_listener = 19; - - // The listener's connection balancer configuration, currently only applicable to TCP listeners. - // If no configuration is specified, Envoy will not attempt to balance active connections between - // worker threads. - ConnectionBalanceConfig connection_balance_config = 20; - - // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and - // create one socket for each worker thread. This makes inbound connections - // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. - // - // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart - // (see `3rd paragraph in 'soreuseport' commit message - // `_). - // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket - // `_. - bool reuse_port = 21; - - // Configuration for :ref:`access logs ` - // emitted by this listener. - repeated config.filter.accesslog.v2.AccessLog access_log = 22; -} diff --git a/generated_api_shadow/envoy/api/v2/listener/BUILD b/generated_api_shadow/envoy/api/v2/listener/BUILD deleted file mode 100644 index ea23dff77c22e..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/auth:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/listener/listener.proto b/generated_api_shadow/envoy/api/v2/listener/listener.proto deleted file mode 100644 index 273b29cb5dd30..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/listener.proto +++ /dev/null @@ -1,11 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.listener; - -import public "envoy/api/v2/listener/listener_components.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option java_outer_classname = "ListenerProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy.Api.V2.ListenerNS"; diff --git a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto deleted file mode 100644 index 08738962c5eee..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto +++ /dev/null @@ -1,287 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.listener; - -import "envoy/api/v2/auth/tls.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/type/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option java_outer_classname = "ListenerComponentsProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy.Api.V2.ListenerNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Listener components] -// Listener :ref:`configuration overview ` - -message Filter { - reserved 3; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 4; - } -} - -// Specifies the match criteria for selecting a specific filter chain for a -// listener. -// -// In order for a filter chain to be selected, *ALL* of its criteria must be -// fulfilled by the incoming connection, properties of which are set by the -// networking stack and/or listener filters. -// -// The following order applies: -// -// 1. Destination port. -// 2. Destination IP address. -// 3. Server name (e.g. SNI for TLS protocol), -// 4. Transport protocol. -// 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Source type (e.g. any, local or external network). -// 7. Source IP address. -// 8. Source port. -// -// For criteria that allow ranges or wildcards, the most specific value in any -// of the configured filter chains that matches the incoming connection is going -// to be used (e.g. for SNI ``www.example.com`` the most specific match would be -// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter -// chain without ``server_names`` requirements). -// -// [#comment: Implemented rules are kept in the preference order, with deprecated fields -// listed at the end, because that's how we want to list them in the docs. -// -// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 13] -message FilterChainMatch { - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - - // Match a connection originating from the same host. - LOCAL = 1 [(udpa.annotations.enum_value_migrate).rename = "SAME_IP_OR_LOOPBACK"]; - - // Match a connection originating from a different host. - EXTERNAL = 2; - } - - reserved 1; - - // Optional destination port to consider when use_original_dst is set on the - // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; - - // If non-empty, an IP address and prefix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - repeated core.CidrRange prefix_ranges = 3; - - // If non-empty, an IP address and suffix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - // [#not-implemented-hide:] - string address_suffix = 4; - - // [#not-implemented-hide:] - google.protobuf.UInt32Value suffix_len = 5; - - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; - - // The criteria is satisfied if the source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the - // parameter is not specified or the list is empty, the source IP address is - // ignored. - repeated core.CidrRange source_prefix_ranges = 6; - - // The criteria is satisfied if the source port of the downstream connection - // is contained in at least one of the specified ports. If the parameter is - // not specified, the source port is ignored. - repeated uint32 source_ports = 7 - [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; - - // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining - // a filter chain match. Those values will be compared against the server names of a new - // connection, when detected by one of the listener filters. - // - // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` - // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. - // - // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. - // - // .. attention:: - // - // See the :ref:`FAQ entry ` on how to configure SNI for more - // information. - repeated string server_names = 11; - - // If non-empty, a transport protocol to consider when determining a filter chain match. - // This value will be compared against the transport protocol of a new connection, when - // it's detected by one of the listener filters. - // - // Suggested values include: - // - // * ``raw_buffer`` - default, used when no transport protocol is detected, - // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // when TLS protocol is detected. - string transport_protocol = 9; - - // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when - // determining a filter chain match. Those values will be compared against the application - // protocols of a new connection, when detected by one of the listener filters. - // - // Suggested values include: - // - // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector - // `, - // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // - // .. attention:: - // - // Currently, only :ref:`TLS Inspector ` provides - // application protocol detection based on the requested - // `ALPN `_ values. - // - // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, - // and matching on values other than ``h2`` is going to lead to a lot of false negatives, - // unless all connecting clients are known to use ALPN. - repeated string application_protocols = 10; -} - -// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and -// various other parameters. -// [#next-free-field: 8] -message FilterChain { - // The criteria to use when matching a connection to this filter chain. - FilterChainMatch filter_chain_match = 1; - - // The TLS context for this filter chain. - // - // .. attention:: - // - // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - // set, `transport_socket` takes priority. - auth.DownstreamTlsContext tls_context = 2 [deprecated = true]; - - // A list of individual network filters that make up the filter chain for - // connections established with the listener. Order matters as the filters are - // processed sequentially as connection events happen. Note: If the filter - // list is empty, the connection will close by default. - repeated Filter filters = 3; - - // Whether the listener should expect a PROXY protocol V1 header on new - // connections. If this option is enabled, the listener will assume that that - // remote address of the connection is the one specified in the header. Some - // load balancers including the AWS ELB support this option. If the option is - // absent or set to false, Envoy will use the physical peer address of the - // connection as the remote address. - google.protobuf.BoolValue use_proxy_proto = 4; - - // [#not-implemented-hide:] filter chain metadata. - core.Metadata metadata = 5; - - // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `tls` and - // :ref:`DownstreamTlsContext ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - core.TransportSocket transport_socket = 6; - - // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no - // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter - // chain is to be dynamically updated or removed via FCDS a unique name must be provided. - string name = 7; -} - -// Listener filter chain match configuration. This is a recursive structure which allows complex -// nested match configurations to be built using various logical operators. -// -// Examples: -// -// * Matches if the destination port is 3306. -// -// .. code-block:: yaml -// -// destination_port_range: -// start: 3306 -// end: 3307 -// -// * Matches if the destination port is 3306 or 15000. -// -// .. code-block:: yaml -// -// or_match: -// rules: -// - destination_port_range: -// start: 3306 -// end: 3307 -// - destination_port_range: -// start: 15000 -// end: 15001 -// -// [#next-free-field: 6] -message ListenerFilterChainMatchPredicate { - // A set of match configurations used for logical operations. - message MatchSet { - // The list of rules that make up the set. - repeated ListenerFilterChainMatchPredicate rules = 1 - [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - ListenerFilterChainMatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // Match destination port. Particularly, the match evaluation must use the recovered local port if - // the owning listener filter is after :ref:`an original_dst listener filter `. - type.Int32Range destination_port_range = 5; - } -} - -message ListenerFilter { - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. - // See the supported filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - - // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - // See :ref:`ListenerFilterChainMatchPredicate ` - // for further examples. - ListenerFilterChainMatchPredicate filter_disabled = 4; -} diff --git a/generated_api_shadow/envoy/api/v2/listener/quic_config.proto b/generated_api_shadow/envoy/api/v2/listener/quic_config.proto deleted file mode 100644 index 2a4616bb09c99..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/quic_config.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.listener; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option java_outer_classname = "QuicConfigProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy.Api.V2.ListenerNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: QUIC listener Config] - -// Configuration specific to the QUIC protocol. -// Next id: 4 -message QuicProtocolOptions { - // Maximum number of streams that the client can negotiate per connection. 100 - // if not specified. - google.protobuf.UInt32Value max_concurrent_streams = 1; - - // Maximum number of milliseconds that connection will be alive when there is - // no network activity. 300000ms if not specified. - google.protobuf.Duration idle_timeout = 2; - - // Connection timeout in milliseconds before the crypto handshake is finished. - // 20000ms if not specified. - google.protobuf.Duration crypto_handshake_timeout = 3; -} diff --git a/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto b/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto deleted file mode 100644 index d4d29531f3aaa..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.listener; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option java_outer_classname = "UdpListenerConfigProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy.Api.V2.ListenerNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: UDP Listener Config] -// Listener :ref:`configuration overview ` - -message UdpListenerConfig { - // Used to look up UDP listener factory, matches "raw_udp_listener" or - // "quic_listener" to create a specific udp listener. - // If not specified, treat as "raw_udp_listener". - string udp_listener_name = 1; - - // Used to create a specific listener factory. To some factory, e.g. - // "raw_udp_listener", config is not needed. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -message ActiveRawUdpListenerConfig { -} diff --git a/generated_api_shadow/envoy/api/v2/ratelimit/BUILD b/generated_api_shadow/envoy/api/v2/ratelimit/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/api/v2/ratelimit/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto b/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto deleted file mode 100644 index 5ac72c69a6fbb..0000000000000 --- a/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.ratelimit; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.ratelimit"; -option java_outer_classname = "RatelimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common rate limit components] - -// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to -// determine the final rate limit key and overall allowed limit. Here are some examples of how -// they might be used for the domain "envoy". -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["remote_address": "10.0.0.1"] -// -// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The -// configuration supplies a default limit for the *remote_address* key. If there is a desire to -// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the -// configuration. -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["path": "/foo/bar"] -// -// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if -// configured that way in the service). -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["path": "/foo/bar"], ["remote_address": "10.0.0.1"] -// -// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. -// Like (1) we can raise/block specific IP addresses if we want with an override configuration. -// -// .. code-block:: cpp -// -// ["authenticated": "true"], ["client_id": "foo"] -// -// What it does: Limits all traffic for an authenticated client "foo" -// -// .. code-block:: cpp -// -// ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] -// -// What it does: Limits traffic to a specific path for an authenticated client "foo" -// -// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. -// This enables building complex application scenarios with a generic backend. -message RateLimitDescriptor { - message Entry { - // Descriptor key. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Descriptor value. - string value = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // Descriptor entries. - repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/api/v2/rds.proto b/generated_api_shadow/envoy/api/v2/rds.proto deleted file mode 100644 index faa5fdcf31942..0000000000000 --- a/generated_api_shadow/envoy/api/v2/rds.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/route.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "RdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: RDS] - -// The resource_names field in DiscoveryRequest specifies a route configuration. -// This allows an Envoy configuration with multiple HTTP listeners (and -// associated HTTP connection manager filters) to use different route -// configurations. Each listener will bind its HTTP connection manager filter to -// a route table via this identifier. -service RouteDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.RouteConfiguration"; - - rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:routes"; - option (google.api.http).body = "*"; - } -} - -// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for -// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered -// during the processing of an HTTP request if a route for the request cannot be resolved. The -// :ref:`resource_names_subscribe ` -// field contains a list of virtual host names or aliases to track. The contents of an alias would -// be the contents of a *host* or *authority* header used to make an http request. An xDS server -// will match an alias to a virtual host based on the content of :ref:`domains' -// ` field. The *resource_names_unsubscribe* field -// contains a list of virtual host names that have been :ref:`unsubscribed -// ` from the routing table associated with the RouteConfiguration. -service VirtualHostDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.route.VirtualHost"; - - rpc DeltaVirtualHosts(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message RdsDummy { -} diff --git a/generated_api_shadow/envoy/api/v2/route.proto b/generated_api_shadow/envoy/api/v2/route.proto deleted file mode 100644 index 549f134a7f439..0000000000000 --- a/generated_api_shadow/envoy/api/v2/route.proto +++ /dev/null @@ -1,113 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/route/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP route configuration] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// [#next-free-field: 11] -message RouteConfiguration { - // The name of the route configuration. For example, it might match - // :ref:`route_config_name - // ` in - // :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`. - string name = 1; - - // An array of virtual hosts that make up the route table. - repeated route.VirtualHost virtual_hosts = 2; - - // An array of virtual hosts will be dynamically loaded via the VHDS API. - // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for - // on-demand discovery of virtual hosts. The contents of these two fields will be merged to - // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration - // taking precedence. - Vhds vhds = 9; - - // Optionally specifies a list of HTTP headers that the connection manager - // will consider to be internal only. If they are found on external requests they will be cleaned - // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information. - repeated string internal_only_headers = 3 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each response that - // the connection manager encodes. Headers specified at this level are applied - // after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // that the connection manager encodes. - repeated string response_headers_to_remove = 5 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each request - // routed by the HTTP connection manager. Headers specified at this level are - // applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // routed by the HTTP connection manager. - repeated string request_headers_to_remove = 8 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // By default, headers that should be added/removed are evaluated from most to least specific: - // - // * route level - // * virtual host level - // * connection manager level - // - // To allow setting overrides at the route or virtual host level, this order can be reversed - // by setting this option to true. Defaults to false. - // - // [#next-major-version: In the v3 API, this will default to true.] - bool most_specific_header_mutations_wins = 10; - - // An optional boolean that specifies whether the clusters that the route - // table refers to will be validated by the cluster manager. If set to true - // and a route refers to a non-existent cluster, the route table will not - // load. If set to false and a route refers to a non-existent cluster, the - // route table will load and the router filter will return a 404 if the route - // is selected at runtime. This setting defaults to true if the route table - // is statically defined via the :ref:`route_config - // ` - // option. This setting default to false if the route table is loaded dynamically via the - // :ref:`rds - // ` - // option. Users may wish to override the default behavior in certain cases (for example when - // using CDS with a static route table). - google.protobuf.BoolValue validate_clusters = 7; -} - -message Vhds { - // Configuration source specifier for VHDS. - core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/route/BUILD b/generated_api_shadow/envoy/api/v2/route/BUILD deleted file mode 100644 index 3d4e6acfeac17..0000000000000 --- a/generated_api_shadow/envoy/api/v2/route/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "//envoy/type/tracing/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/route/route.proto b/generated_api_shadow/envoy/api/v2/route/route.proto deleted file mode 100644 index ec13e9e5c801b..0000000000000 --- a/generated_api_shadow/envoy/api/v2/route/route.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.route; - -import public "envoy/api/v2/route/route_components.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.route"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/api/v2/route/route_components.proto b/generated_api_shadow/envoy/api/v2/route/route_components.proto deleted file mode 100644 index d73fbb8674c90..0000000000000 --- a/generated_api_shadow/envoy/api/v2/route/route_components.proto +++ /dev/null @@ -1,1628 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.route; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/matcher/regex.proto"; -import "envoy/type/matcher/string.proto"; -import "envoy/type/percent.proto"; -import "envoy/type/range.proto"; -import "envoy/type/tracing/v2/custom_tag.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.route"; -option java_outer_classname = "RouteComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP route components] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// The top level element in the routing configuration is a virtual host. Each virtual host has -// a logical name as well as a set of domains that get routed to it based on the incoming request's -// host header. This allows a single listener to service multiple top level domain path trees. Once -// a virtual host is selected based on the domain, the routes are processed in order to see which -// upstream cluster to route to or whether to perform a redirect. -// [#next-free-field: 21] -message VirtualHost { - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - - reserved 9; - - // The logical name of the virtual host. This is used when emitting certain - // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // A list of domains (host/authority header) that will be matched to this - // virtual host. Wildcard hosts are supported in the suffix or prefix form. - // - // Domain search order: - // 1. Exact domain names: ``www.foo.com``. - // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. - // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. - // 4. Special wildcard ``*`` matching any domain. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - // The longest wildcards match first. - // Only a single virtual host in the entire route configuration can match on ``*``. A domain - // must be unique across all virtual hosts or the config will fail to load. - // - // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. - repeated string domains = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} - }]; - - // The list of routes that will be matched, in order, for incoming requests. - // The first route that matches will be used. - repeated Route routes = 3; - - // Specifies the type of TLS enforcement the virtual host expects. If this option is not - // specified, there is no TLS requirement for the virtual host. - TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; - - // A list of virtual clusters defined for this virtual host. Virtual clusters - // are used for additional statistics gathering. - repeated VirtualCluster virtual_clusters = 5; - - // Specifies a set of rate limit configurations that will be applied to the - // virtual host. - repeated RateLimit rate_limits = 6; - - // Specifies a list of HTTP headers that should be added to each request - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // handled by this virtual host. - repeated string request_headers_to_remove = 13; - - // Specifies a list of HTTP headers that should be added to each response - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // handled by this virtual host. - repeated string response_headers_to_remove = 11; - - // Indicates that the virtual host has a CORS policy. - CorsPolicy cors = 8; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map per_filter_config = 12 [deprecated = true]; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map typed_per_filter_config = 15; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the upstream request. Setting this option will cause it to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the upstream - // will see the attempt count as perceived by the second Envoy. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - // - // [#next-major-version: rename to include_attempt_count_in_request.] - bool include_request_attempt_count = 14; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the downstream response. Setting this option will cause the router to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the downstream - // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - bool include_attempt_count_in_response = 19; - - // Indicates the retry policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - RetryPolicy retry_policy = 16; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that setting a route level entry - // will take precedence over this config and it'll be treated independently (e.g.: values are not - // inherited). :ref:`Retry policy ` should not be - // set if this field is used. - google.protobuf.Any retry_policy_typed_config = 20; - - // Indicates the hedge policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - HedgePolicy hedge_policy = 17; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum - // value of this and the listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; -} - -// A filter-defined action type. -message FilterAction { - google.protobuf.Any action = 1; -} - -// A route is both a specification of how to match a request as well as an indication of what to do -// next (e.g., redirect, forward, rewrite, etc.). -// -// .. attention:: -// -// Envoy supports routing on HTTP method via :ref:`header matching -// `. -// [#next-free-field: 18] -message Route { - reserved 6; - - // Name for the route. - string name = 14; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - oneof action { - option (validate.required) = true; - - // Route request to some upstream cluster. - RouteAction route = 2; - - // Return a redirect. - RedirectAction redirect = 3; - - // Return an arbitrary HTTP response directly, without proxying. - DirectResponseAction direct_response = 7; - - // [#not-implemented-hide:] - // If true, a filter will define the action (e.g., it could dynamically generate the - // RouteAction). - FilterAction filter_action = 17; - } - - // The Metadata field can be used to provide additional information - // about the route. It can be used for configuration, stats, and logging. - // The metadata should go under the filter namespace that will need it. - // For instance, if the metadata is intended for the Router filter, - // the filter name should be specified as *envoy.filters.http.router*. - core.Metadata metadata = 4; - - // Decorator for the matched route. - Decorator decorator = 5; - - // The per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - map per_filter_config = 8 [deprecated = true]; - - // The typed_per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - map typed_per_filter_config = 13; - - // Specifies a set of headers that will be added to requests matching this - // route. Headers specified at this level are applied before headers from the - // enclosing :ref:`envoy_api_msg_route.VirtualHost` and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // matching this route. - repeated string request_headers_to_remove = 12; - - // Specifies a set of headers that will be added to responses to requests - // matching this route. Headers specified at this level are applied before - // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on - // :ref:`custom request headers `. - repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - repeated string response_headers_to_remove = 11; - - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - Tracing tracing = 15; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set, the bytes actually buffered will be the minimum value of this and the - // listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; -} - -// Compared to the :ref:`cluster ` field that specifies a -// single upstream cluster as the target of a request, the :ref:`weighted_clusters -// ` option allows for specification of -// multiple upstream clusters along with weights that indicate the percentage of -// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the -// weights. -message WeightedCluster { - // [#next-free-field: 11] - message ClusterWeight { - reserved 7; - - // Name of the upstream cluster. The cluster must exist in the - // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // An integer between 0 and :ref:`total_weight - // `. When a request matches the route, - // the choice of an upstream cluster is determined by its weight. The sum of weights across all - // entries in the clusters array must add up to the total_weight, which defaults to 100. - google.protobuf.UInt32Value weight = 2; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered for - // load balancing. Note that this will be merged with what's provided in - // :ref:`RouteAction.metadata_match `, with - // values here taking precedence. The filter name should be specified as *envoy.lb*. - core.Metadata metadata_match = 3; - - // Specifies a list of headers to be added to requests when this cluster is selected - // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request when - // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - repeated string request_headers_to_remove = 9; - - // Specifies a list of headers to be added to responses when this cluster is selected - // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of headers to be removed from responses when this cluster is selected - // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - repeated string response_headers_to_remove = 6; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map per_filter_config = 8 [deprecated = true]; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map typed_per_filter_config = 10; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the total weight across all clusters. The sum of all cluster weights must equal this - // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies the runtime key prefix that should be used to construct the - // runtime keys associated with each cluster. When the *runtime_key_prefix* is - // specified, the router will look for weights associated with each upstream - // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - // *cluster[i]* denotes an entry in the clusters array field. If the runtime - // key for the cluster does not exist, the value specified in the - // configuration file will be used as the default weight. See the :ref:`runtime documentation - // ` for how key names map to the underlying implementation. - string runtime_key_prefix = 2; -} - -// [#next-free-field: 12] -message RouteMatch { - message GrpcRouteMatchOptions { - } - - message TlsContextMatchOptions { - // If specified, the route will match against whether or not a certificate is presented. - // If not specified, certificate presentation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue presented = 1; - - // If specified, the route will match against whether or not a certificate is validated. - // If not specified, certificate validation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue validated = 2; - } - - reserved 5; - - oneof path_specifier { - option (validate.required) = true; - - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - string prefix = 1; - - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - string path = 2; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. The regex grammar is defined `here - // `_. - // - // Examples: - // - // * The regex ``/b[io]t`` matches the path */bit* - // * The regex ``/b[io]t`` matches the path */bot* - // * The regex ``/b[io]t`` does not match the path */bite* - // * The regex ``/b[io]t`` does not match the path */bit/bot* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex` as it is not safe for use with - // untrusted input in all cases. - string regex = 3 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - } - - // Indicates that prefix/path matching should be case sensitive. The default - // is true. - google.protobuf.BoolValue case_sensitive = 4; - - // Indicates that the route should additionally match on a runtime key. Every time the route - // is considered for a match, it must also fall under the percentage of matches indicated by - // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the router continues to evaluate the remaining match criteria. A runtime_fraction - // route configuration can be used to roll out route changes in a gradual manner without full - // code/config deploys. Refer to the :ref:`traffic shifting - // ` docs for additional documentation. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an - // integer with the assumption that the value is an integral percentage out of 100. For - // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent - // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - core.RuntimeFractionalPercent runtime_fraction = 9; - - // Specifies a set of headers that the route should match on. The router will - // check the request’s headers against all the specified headers in the route - // config. A match will happen if all the headers in the route are present in - // the request with the same values (or based on presence if the value field - // is not in the config). - repeated HeaderMatcher headers = 6; - - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - repeated QueryParameterMatcher query_parameters = 7; - - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - GrpcRouteMatchOptions grpc = 8; - - // If specified, the client tls context will be matched against the defined - // match options. - // - // [#next-major-version: unify with RBAC] - TlsContextMatchOptions tls_context = 11; -} - -// [#next-free-field: 12] -message CorsPolicy { - // Specifies the origins that will be allowed to do CORS requests. - // - // An origin is allowed if either allow_origin or allow_origin_regex match. - // - // .. attention:: - // This field has been deprecated in favor of `allow_origin_string_match`. - repeated string allow_origin = 1 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies regex patterns that match allowed origins. - // - // An origin is allowed if either allow_origin or allow_origin_regex match. - // - // .. attention:: - // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for - // use with untrusted input in all cases. - repeated string allow_origin_regex = 8 - [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; - - // Specifies string patterns that match allowed origins. An origin is allowed if any of the - // string matchers match. - repeated type.matcher.StringMatcher allow_origin_string_match = 11; - - // Specifies the content for the *access-control-allow-methods* header. - string allow_methods = 2; - - // Specifies the content for the *access-control-allow-headers* header. - string allow_headers = 3; - - // Specifies the content for the *access-control-expose-headers* header. - string expose_headers = 4; - - // Specifies the content for the *access-control-max-age* header. - string max_age = 5; - - // Specifies whether the resource allows credentials. - google.protobuf.BoolValue allow_credentials = 6; - - oneof enabled_specifier { - // Specifies if the CORS filter is enabled. Defaults to true. Only effective on route. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`filter_enabled` field instead. - google.protobuf.BoolValue enabled = 7 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies the % of requests for which the CORS filter is enabled. - // - // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - // filter will be enabled for 100% of the requests. - // - // If :ref:`runtime_key ` is - // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - core.RuntimeFractionalPercent filter_enabled = 9; - } - - // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not - // enforced. - // - // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those - // fields have to explicitly disable the filter in order for this setting to take effect. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* to determine if it's valid but will not enforce any policies. - core.RuntimeFractionalPercent shadow_enabled = 10; -} - -// [#next-free-field: 34] -message RouteAction { - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - - // Configures :ref:`internal redirect ` behavior. - enum InternalRedirectAction { - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - // - // .. note:: - // - // Shadowing will not be triggered if the primary cluster does not exist. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If not specified, all requests to the target cluster will be mirrored. If - // specified, Envoy will lookup the runtime key to get the % of requests to - // mirror. Valid values are from 0 to 10000, allowing for increments of - // 0.01% of requests to be mirrored. If the runtime key is specified in the - // configuration but not present in runtime, 0 is the default and thus 0% of - // requests will be mirrored. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`runtime_fraction - // ` - // field instead. Mirroring occurs if both this and - // ` - // are not set. - string runtime_key = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // If not specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - core.RuntimeFractionalPercent runtime_fraction = 3; - - // Determines if the trace span should be sampled. Defaults to true. - google.protobuf.BoolValue trace_sampled = 4; - } - - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - // [#next-free-field: 7] - message HashPolicy { - message Header { - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - // Hash on source IP address. - bool source_ip = 1; - } - - message QueryParameter { - // The name of the URL query parameter that will be used to obtain the hash - // key. If the parameter is not present, no hash will be produced. Query - // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - message FilterState { - // The name of the Object in the per-request filterState, which is an - // Envoy::Http::Hashable object. If there is no data associated with the key, - // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - - // Query parameter hash policy. - QueryParameter query_parameter = 5; - - // Filter state hash policy. - FilterState filter_state = 6; - } - - // The flag that short-circuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:`upgrade_configs - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - } - - reserved 12, 18, 19, 16, 22, 21; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string cluster_header = 2 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - WeightedCluster weighted_clusters = 3; - } - - // The HTTP status code to use when configured cluster is not found. - // The default response code is 503 Service Unavailable. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what's set in this field will be considered - // for load balancing. If using :ref:`weighted_clusters - // `, metadata will be merged, with values - // provided there taking precedence. The filter name should be specified as *envoy.lb*. - core.Metadata metadata_match = 4; - - // Indicates that during forwarding, the matched prefix (or path) should be - // swapped with this value. This option allows application URLs to be rooted - // at a different path from those exposed at the reverse proxy layer. The router filter will - // place the original path before rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of *prefix_rewrite* or - // :ref:`regex_rewrite ` - // may be specified. - // - // .. attention:: - // - // Pay careful attention to the use of trailing slashes in the - // :ref:`route's match ` prefix value. - // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single - // :ref:`Route `, as shown by the below config entries: - // - // .. code-block:: yaml - // - // - match: - // prefix: "/prefix/" - // route: - // prefix_rewrite: "/" - // - match: - // prefix: "/prefix" - // route: - // prefix_rewrite: "/" - // - // Having above entries in the config, requests to */prefix* will be stripped to */*, while - // requests to */prefix/etc* will be stripped to */etc*. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. The router filter will place the original path as it was - // before the rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of :ref:`prefix_rewrite ` - // or *regex_rewrite* may be specified. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.RegexMatchAndSubstitute regex_rewrite = 32; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite = 6 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}, - (udpa.annotations.field_migrate).rename = "host_rewrite_literal" - ]; - - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - google.protobuf.BoolValue auto_host_rewrite = 7; - - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string auto_host_rewrite_header = 29 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, - (udpa.annotations.field_migrate).rename = "host_rewrite_header" - ]; - } - - // Specifies the upstream timeout for the route. If not specified, the default is 15s. This - // spans between the point at which the entire downstream request (i.e. end-of-stream) has been - // processed and when the upstream response has been completely processed. A value of 0 will - // disable the route's timeout. - // - // .. note:: - // - // This timeout includes all retries. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration timeout = 8; - - // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, - // although the connection manager wide :ref:`stream_idle_timeout - // ` - // will still apply. A value of 0 will completely disable the route's idle timeout, even if a - // connection manager stream idle timeout is configured. - // - // The idle timeout is distinct to :ref:`timeout - // `, which provides an upper bound - // on the upstream response time; :ref:`idle_timeout - // ` instead bounds the amount - // of time the request's stream may be idle. - // - // After header decoding, the idle timeout will apply on downstream and - // upstream request events. Each time an encode/decode event for headers or - // data is processed for the stream, the timer will be reset. If the timeout - // fires, the stream is terminated with a 408 Request Timeout error code if no - // upstream response header has been received, otherwise a stream reset - // occurs. - google.protobuf.Duration idle_timeout = 24; - - // Indicates that the route has a retry policy. Note that if this is set, - // it'll take precedence over the virtual host level retry policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - RetryPolicy retry_policy = 9; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that if this is set, it'll take - // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, - // most internal one becomes the enforced policy). :ref:`Retry policy ` - // should not be set if this field is used. - google.protobuf.Any retry_policy_typed_config = 33; - - // Indicates that the route has a request mirroring policy. - // - // .. attention:: - // This field has been deprecated in favor of `request_mirror_policies` which supports one or - // more mirroring policies. - RequestMirrorPolicy request_mirror_policy = 10 [deprecated = true]; - - // Indicates that the route has request mirroring policies. - repeated RequestMirrorPolicy request_mirror_policies = 30; - - // Optionally specifies the :ref:`routing priority `. - core.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; - - // Specifies a set of rate limit configurations that could be applied to the - // route. - repeated RateLimit rate_limits = 13; - - // Specifies if the rate limit filter should include the virtual host rate - // limits. By default, if the route configured rate limits, the virtual host - // :ref:`rate_limits ` are not applied to the - // request. - google.protobuf.BoolValue include_vh_rate_limits = 14; - - // Specifies a list of hash policies to use for ring hash load balancing. Each - // hash policy is evaluated individually and the combined result is used to - // route the request. The method of combination is deterministic such that - // identical lists of hash policies will produce the same hash. Since a hash - // policy examines specific parts of a request, it can fail to produce a hash - // (i.e. if the hashed header is not present). If (and only if) all configured - // hash policies fail to generate a hash, no hash will be produced for - // the route. In this case, the behavior is the same as if no hash policies - // were specified (i.e. the ring hash load balancer will choose a random - // backend). If a hash policy has the "terminal" attribute set to true, and - // there is already a hash generated, the hash is returned immediately, - // ignoring the rest of the hash policy list. - repeated HashPolicy hash_policy = 15; - - // Indicates that the route has a CORS policy. - CorsPolicy cors = 17; - - // If present, and the request is a gRPC request, use the - // `grpc-timeout header `_, - // or its default value (infinity) instead of - // :ref:`timeout `, but limit the applied timeout - // to the maximum value specified here. If configured as 0, the maximum allowed timeout for - // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used - // and gRPC requests time out like any other requests using - // :ref:`timeout ` or its default. - // This can be used to prevent unexpected upstream request timeouts due to potentially long - // time gaps between gRPC request and response in gRPC streaming mode. - // - // .. note:: - // - // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - // precedence over `grpc-timeout header `_, when - // both are present. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration max_grpc_timeout = 23; - - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - google.protobuf.Duration grpc_timeout_offset = 28; - - repeated UpgradeConfig upgrade_configs = 25; - - InternalRedirectAction internal_redirect_action = 26; - - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; - - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - HedgePolicy hedge_policy = 27; -} - -// HTTP retry :ref:`architecture overview `. -// [#next-free-field: 11] -message RetryPolicy { - message RetryPriority { - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - message RetryHostPredicate { - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - message RetryBackOff { - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - // Specifies the conditions under which retry takes place. These are the same - // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - string retry_on = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. These are the same conditions documented for - // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; - - // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - // same conditions documented for - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - // - // .. note:: - // - // If left unspecified, Envoy will use the global - // :ref:`route timeout ` for the request. - // Consequently, when using a :ref:`5xx ` based - // retry policy, a request that times out will not be retried as the total timeout budget - // would have been exhausted. - google.protobuf.Duration per_try_timeout = 3; - - // Specifies an implementation of a RetryPriority which is used to determine the - // distribution of load across priorities used for retries. Refer to - // :ref:`retry plugin configuration ` for more details. - RetryPriority retry_priority = 4; - - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host - // for retries. If any of the predicates reject the host, host selection will be reattempted. - // Refer to :ref:`retry plugin configuration ` for more - // details. - repeated RetryHostPredicate retry_host_predicate = 5; - - // The maximum number of times host selection will be reattempted before giving up, at which - // point the host that was last selected will be routed to. If unspecified, this will default to - // retrying once. - int64 host_selection_retry_max_attempts = 6; - - // HTTP status codes that should trigger a retry in addition to those specified by retry_on. - repeated uint32 retriable_status_codes = 7; - - // Specifies parameters that control retry back off. This parameter is optional, in which case the - // default base interval is 25 milliseconds or, if set, the current value of the - // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times - // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - // describes Envoy's back-off algorithm. - RetryBackOff retry_back_off = 8; - - // HTTP response headers that trigger a retry if present in the response. A retry will be - // triggered if any of the header matches match the upstream response headers. - // The field is only consulted if 'retriable-headers' retry policy is active. - repeated HeaderMatcher retriable_headers = 9; - - // HTTP headers which must be present in the request for retries to be attempted. - repeated HeaderMatcher retriable_request_headers = 10; -} - -// HTTP request hedging :ref:`architecture overview `. -message HedgePolicy { - // Specifies the number of initial requests that should be sent upstream. - // Must be at least 1. - // Defaults to 1. - // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies a probability that an additional upstream request should be sent - // on top of what is specified by initial_requests. - // Defaults to 0. - // [#not-implemented-hide:] - type.FractionalPercent additional_request_chance = 2; - - // Indicates that a hedged request should be sent when the per-try timeout is hit. - // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. - // The first request to complete successfully will be the one returned to the caller. - // - // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. - // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client - // if there are no more retries left. - // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. - // - // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least - // one error code and specifies a maximum number of retries. - // - // Defaults to false. - bool hedge_on_per_try_timeout = 3; -} - -// [#next-free-field: 9] -message RedirectAction { - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - - // When the scheme redirection take place, the following rules apply: - // 1. If the source URI scheme is `http` and the port is explicitly - // set to `:80`, the port will be removed after the redirection - // 2. If the source URI scheme is `https` and the port is explicitly - // set to `:443`, the port will be removed after the redirection - oneof scheme_rewrite_specifier { - // The scheme portion of the URL will be swapped with "https". - bool https_redirect = 4; - - // The scheme portion of the URL will be swapped with this value. - string scheme_redirect = 7; - } - - // The host portion of the URL will be swapped with this value. - string host_redirect = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The port value of the URL will be swapped with this value. - uint32 port_redirect = 8; - - oneof path_rewrite_specifier { - // The path portion of the URL will be swapped with this value. - // Please note that query string in path_redirect will override the - // request's query string and will not be stripped. - // - // For example, let's say we have the following routes: - // - // - match: { path: "/old-path-1" } - // redirect: { path_redirect: "/new-path-1" } - // - match: { path: "/old-path-2" } - // redirect: { path_redirect: "/new-path-2", strip-query: "true" } - // - match: { path: "/old-path-3" } - // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } - // - // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" - // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" - // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" - string path_redirect = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirection, the matched prefix (or path) - // should be swapped with this value. This option allows redirect URLs be dynamically created - // based on the request. - // - // .. attention:: - // - // Pay attention to the use of trailing slashes as mentioned in - // :ref:`RouteAction's prefix_rewrite `. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; - - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. - bool strip_query = 6; -} - -message DirectResponseAction { - // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; - - // Specifies the content of the response body. If this setting is omitted, - // no body is included in the generated response. - // - // .. note:: - // - // Headers can be specified using *response_headers_to_add* in the enclosing - // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or - // :ref:`envoy_api_msg_route.VirtualHost`. - core.DataSource body = 2; -} - -message Decorator { - // The operation name associated with the request matched to this route. If tracing is - // enabled, this information will be used as the span name reported for this request. - // - // .. note:: - // - // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden - // by the :ref:`x-envoy-decorator-operation - // ` header. - string operation = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Whether the decorated details should be propagated to the other party. The default is true. - google.protobuf.BoolValue propagate = 2; -} - -message Tracing { - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.FractionalPercent client_sampling = 1; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.FractionalPercent random_sampling = 2; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.FractionalPercent overall_sampling = 3; - - // A list of custom tags with unique tag name to create tags for the active span. - // It will take effect after merging with the :ref:`corresponding configuration - // ` - // configured in the HTTP connection manager. If two tags with the same name are configured - // each in the HTTP connection manager and the route level, the one configured here takes - // priority. - repeated type.tracing.v2.CustomTag custom_tags = 4; -} - -// A virtual cluster is a way of specifying a regex matching rule against -// certain important endpoints such that statistics are generated explicitly for -// the matched requests. The reason this is useful is that when doing -// prefix/path matching Envoy does not always know what the application -// considers to be an endpoint. Thus, it’s impossible for Envoy to generically -// emit per endpoint statistics. However, often systems have highly critical -// endpoints that they wish to get “perfect” statistics on. Virtual cluster -// statistics are perfect in the sense that they are emitted on the downstream -// side such that they include network level failures. -// -// Documentation for :ref:`virtual cluster statistics `. -// -// .. note:: -// -// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for -// every application endpoint. This is both not easily maintainable and as well the matching and -// statistics output are not free. -message VirtualCluster { - // Specifies a regex pattern to use for matching requests. The entire path of the request - // must match the regex. The regex grammar used is defined `here - // `_. - // - // Examples: - // - // * The regex ``/rides/\d+`` matches the path */rides/0* - // * The regex ``/rides/\d+`` matches the path */rides/123* - // * The regex ``/rides/\d+`` does not match the path */rides/123/456* - // - // .. attention:: - // This field has been deprecated in favor of `headers` as it is not safe for use with - // untrusted input in all cases. - string pattern = 1 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // Specifies a list of header matchers to use for matching requests. Each specified header must - // match. The pseudo-headers `:path` and `:method` can be used to match the request path and - // method, respectively. - repeated HeaderMatcher headers = 4; - - // Specifies the name of the virtual cluster. The virtual cluster name as well - // as the virtual host name are used when emitting statistics. The statistics are emitted by the - // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Optionally specifies the HTTP method to match on. For example GET, PUT, - // etc. - // - // .. attention:: - // This field has been deprecated in favor of `headers`. - core.RequestMethod method = 3 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; -} - -// Global rate limiting :ref:`architecture overview `. -message RateLimit { - // [#next-free-field: 7] - message Action { - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("source_cluster", "") - // - // is derived from the :option:`--service-cluster` option. - message SourceCluster { - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("destination_cluster", "") - // - // Once a request matches against a route table rule, a routed cluster is determined by one of - // the following :ref:`route table configuration ` - // settings: - // - // * :ref:`cluster ` indicates the upstream cluster - // to route to. - // * :ref:`weighted_clusters ` - // chooses a cluster randomly from a set of clusters with attributed weight. - // * :ref:`cluster_header ` indicates which - // header in the request contains the target cluster. - message DestinationCluster { - } - - // The following descriptor entry is appended when a header contains a key that matches the - // *header_name*: - // - // .. code-block:: cpp - // - // ("", "") - message RequestHeaders { - // The header name to be queried from the request headers. The header’s - // value is used to populate the value of the descriptor entry for the - // descriptor_key. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; - - // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // The following descriptor entry is appended to the descriptor and is populated using the - // trusted address from :ref:`x-forwarded-for `: - // - // .. code-block:: cpp - // - // ("remote_address", "") - message RemoteAddress { - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("generic_key", "") - message GenericKey { - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("header_match", "") - message HeaderValueMatch { - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If set to true, the action will append a descriptor entry when the - // request matches the headers. If set to false, the action will append a - // descriptor entry when the request does not match the headers. The - // default value is true. - google.protobuf.BoolValue expect_match = 2; - - // Specifies a set of headers that the rate limit action should match - // on. The action will check the request’s headers against all the - // specified headers in the config. A match will happen if all the - // headers in the config are present in the request with the same values - // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; - } - - oneof action_specifier { - option (validate.required) = true; - - // Rate limit on source cluster. - SourceCluster source_cluster = 1; - - // Rate limit on destination cluster. - DestinationCluster destination_cluster = 2; - - // Rate limit on request headers. - RequestHeaders request_headers = 3; - - // Rate limit on remote address. - RemoteAddress remote_address = 4; - - // Rate limit on a generic key. - GenericKey generic_key = 5; - - // Rate limit on the existence of request headers. - HeaderValueMatch header_value_match = 6; - } - } - - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - - // A list of actions that are to be applied for this rate limit configuration. - // Order matters as the actions are processed sequentially and the descriptor - // is composed by appending descriptor entries in that sequence. If an action - // cannot append a descriptor entry, no descriptor is generated for the - // configuration. See :ref:`composing actions - // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; -} - -// .. attention:: -// -// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* -// header. Thus, if attempting to match on *Host*, match on *:authority* instead. -// -// .. attention:: -// -// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both -// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., -// -// .. code-block:: json -// -// { -// "name": ":method", -// "exact_match": "POST" -// } -// -// .. attention:: -// In the absence of any header match specifier, match will default to :ref:`present_match -// `. i.e, a request that has the :ref:`name -// ` header will match, regardless of the header's -// value. -// -// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] -// [#next-free-field: 12] -message HeaderMatcher { - reserved 2, 3; - - // Specifies the name of the header in the request. - string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Specifies how the header match will be performed to route the request. - oneof header_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 4; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. The regex grammar used in the value field is defined - // `here `_. - // - // Examples: - // - // * The regex ``\d{3}`` matches the value *123* - // * The regex ``\d{3}`` does not match the value *1234* - // * The regex ``\d{3}`` does not match the value *123.456* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use - // with untrusted input in all cases. - string regex_match = 5 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. - type.matcher.RegexMatcher safe_regex_match = 11; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting of - // an optional plus or minus sign followed by a sequence of digits. The rule will not match if - // the header value does not represent an integer. Match will fail for empty values, floating - // point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - // "-1somestring" - type.Int64Range range_match = 6; - - // If specified, header match will be performed based on whether the header is in the - // request. - bool present_match = 7; - - // If specified, header match will be performed based on the prefix of the header value. - // Note: empty prefix is not allowed, please use present_match instead. - // - // Examples: - // - // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; - - // If specified, header match will be performed based on the suffix of the header value. - // Note: empty suffix is not allowed, please use present_match instead. - // - // Examples: - // - // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; - } - - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. - bool invert_match = 8; -} - -// Query parameter matching treats the query string of a request's :path header -// as an ampersand-separated list of keys and/or key=value elements. -// [#next-free-field: 7] -message QueryParameterMatcher { - // Specifies the name of a key that must be present in the requested - // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; - - // Specifies the value of the key. If the value is absent, a request - // that contains the key in its query string will match, whether the - // key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") - // - // ..attention:: - // This field is deprecated. Use an `exact` match inside the `string_match` field. - string value = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies whether the query parameter value is a regular expression. - // Defaults to false. The entire query parameter value (i.e., the part to - // the right of the equals sign in "key=value") must match the regex. - // E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. - // - // ..attention:: - // This field is deprecated. Use a `safe_regex` match inside the `string_match` field. - google.protobuf.BoolValue regex = 4 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - oneof query_parameter_match_specifier { - // Specifies whether a query parameter value should match against a string. - type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; - - // Specifies whether a query parameter should be present. - bool present_match = 6; - } -} diff --git a/generated_api_shadow/envoy/api/v2/scoped_route.proto b/generated_api_shadow/envoy/api/v2/scoped_route.proto deleted file mode 100644 index 0841bd08723c5..0000000000000 --- a/generated_api_shadow/envoy/api/v2/scoped_route.proto +++ /dev/null @@ -1,109 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "ScopedRouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP scoped routing configuration] -// * Routing :ref:`architecture overview ` - -// Specifies a routing scope, which associates a -// :ref:`Key` to a -// :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). -// -// The HTTP connection manager builds up a table consisting of these Key to -// RouteConfiguration mappings, and looks up the RouteConfiguration to use per -// request according to the algorithm specified in the -// :ref:`scope_key_builder` -// assigned to the HttpConnectionManager. -// -// For example, with the following configurations (in YAML): -// -// HttpConnectionManager config: -// -// .. code:: -// -// ... -// scoped_routes: -// name: foo-scoped-routes -// scope_key_builder: -// fragments: -// - header_value_extractor: -// name: X-Route-Selector -// element_separator: , -// element: -// separator: = -// key: vip -// -// ScopedRouteConfiguration resources (specified statically via -// :ref:`scoped_route_configurations_list` -// or obtained dynamically via SRDS): -// -// .. code:: -// -// (1) -// name: route-scope1 -// route_configuration_name: route-config1 -// key: -// fragments: -// - string_key: 172.10.10.20 -// -// (2) -// name: route-scope2 -// route_configuration_name: route-config2 -// key: -// fragments: -// - string_key: 172.20.20.30 -// -// A request from a client such as: -// -// .. code:: -// -// GET / HTTP/1.1 -// Host: foo.com -// X-Route-Selector: vip=172.10.10.20 -// -// would result in the routing table defined by the `route-config1` -// RouteConfiguration being assigned to the HTTP request/stream. -// -message ScopedRouteConfiguration { - // Specifies a key which is matched against the output of the - // :ref:`scope_key_builder` - // specified in the HttpConnectionManager. The matching is done per HTTP - // request and is dependent on the order of the fragments contained in the - // Key. - message Key { - message Fragment { - oneof type { - option (validate.required) = true; - - // A string to match against. - string string_key = 1; - } - } - - // The ordered set of fragments to match against. The order must match the - // fragments in the corresponding - // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an - // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated - // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The key to match against. - Key key = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/srds.proto b/generated_api_shadow/envoy/api/v2/srds.proto deleted file mode 100644 index 0edb99a1eccbb..0000000000000 --- a/generated_api_shadow/envoy/api/v2/srds.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/scoped_route.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "SrdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: SRDS] -// * Routing :ref:`architecture overview ` - -// The Scoped Routes Discovery Service (SRDS) API distributes -// :ref:`ScopedRouteConfiguration` -// resources. Each ScopedRouteConfiguration resource represents a "routing -// scope" containing a mapping that allows the HTTP connection manager to -// dynamically assign a routing table (specified via a -// :ref:`RouteConfiguration` message) to each -// HTTP request. -service ScopedRoutesDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.ScopedRouteConfiguration"; - - rpc StreamScopedRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaScopedRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:scoped-routes"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message SrdsDummy { -} diff --git a/generated_api_shadow/envoy/config/README.md b/generated_api_shadow/envoy/config/README.md deleted file mode 100644 index 279bd7c2e8525..0000000000000 --- a/generated_api_shadow/envoy/config/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Protocol buffer definitions for Envoy's bootstrap, filter, and service configuration. - -Visibility should be constrained to none or `//envoy/config/bootstrap/v2` by default. diff --git a/generated_api_shadow/envoy/config/accesslog/v2/BUILD b/generated_api_shadow/envoy/config/accesslog/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/accesslog/v2/als.proto b/generated_api_shadow/envoy/config/accesslog/v2/als.proto deleted file mode 100644 index 5b4106af106ed..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v2/als.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; - -package envoy.config.accesslog.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.grpc.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Configuration for the built-in *envoy.access_loggers.http_grpc* -// :ref:`AccessLog `. This configuration will -// populate :ref:`StreamAccessLogsMessage.http_logs -// `. -// [#extension: envoy.access_loggers.http_grpc] -message HttpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; - - // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers - // `. - repeated string additional_request_headers_to_log = 2; - - // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers - // `. - repeated string additional_response_headers_to_log = 3; - - // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers - // `. - repeated string additional_response_trailers_to_log = 4; -} - -// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will -// populate *StreamAccessLogsMessage.tcp_logs*. -// [#extension: envoy.access_loggers.tcp_grpc] -message TcpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; -} - -// Common configuration for gRPC access logs. -// [#next-free-field: 6] -message CommonGrpcAccessLogConfig { - // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier - // `. This allows the - // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The gRPC service for the access log service. - api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time - // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to - // 1 second. - google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; - - // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until - // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it - // to zero effectively disables the batching. Defaults to 16384. - google.protobuf.UInt32Value buffer_size_bytes = 4; - - // Additional filter state objects to log in :ref:`filter_state_objects - // `. - // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object. - repeated string filter_state_objects_to_log = 5; -} diff --git a/generated_api_shadow/envoy/config/accesslog/v2/file.proto b/generated_api_shadow/envoy/config/accesslog/v2/file.proto deleted file mode 100644 index 9b8671c81358e..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v2/file.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.config.accesslog.v2; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; -option java_outer_classname = "FileProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.file.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: File access log] -// [#extension: envoy.access_loggers.file] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* -// AccessLog. -message FileAccessLog { - // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof access_log_format { - // Access log :ref:`format string`. - // Envoy supports :ref:`custom access log formats ` as well as a - // :ref:`default format `. - string format = 2; - - // Access log :ref:`format dictionary`. All values - // are rendered as strings. - google.protobuf.Struct json_format = 3; - - // Access log :ref:`format dictionary`. Values are - // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may - // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the - // documentation for a specific command operator for details. - google.protobuf.Struct typed_json_format = 4; - } -} diff --git a/generated_api_shadow/envoy/config/accesslog/v3/BUILD b/generated_api_shadow/envoy/config/accesslog/v3/BUILD deleted file mode 100644 index af60e4e1966f5..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v3/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto deleted file mode 100644 index 2161f80478c23..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ /dev/null @@ -1,327 +0,0 @@ -syntax = "proto3"; - -package envoy.config.accesslog.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.accesslog.v3"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common access log types] - -message AccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.AccessLog"; - - // The name of the access log extension to instantiate. - // The name must match one of the compiled in loggers. - // See the :ref:`extensions listed in typed_config below ` for the default list of available loggers. - string name = 1; - - // Filter which is used to determine if the access log needs to be written. - AccessLogFilter filter = 2; - - // Custom configuration that must be set according to the access logger extension being instantiated. - // [#extension-category: envoy.access_loggers] - oneof config_type { - google.protobuf.Any typed_config = 4; - - google.protobuf.Struct hidden_envoy_deprecated_config = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// [#next-free-field: 13] -message AccessLogFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.AccessLogFilter"; - - oneof filter_specifier { - option (validate.required) = true; - - // Status code filter. - StatusCodeFilter status_code_filter = 1; - - // Duration filter. - DurationFilter duration_filter = 2; - - // Not health check filter. - NotHealthCheckFilter not_health_check_filter = 3; - - // Traceable filter. - TraceableFilter traceable_filter = 4; - - // Runtime filter. - RuntimeFilter runtime_filter = 5; - - // And filter. - AndFilter and_filter = 6; - - // Or filter. - OrFilter or_filter = 7; - - // Header filter. - HeaderFilter header_filter = 8; - - // Response flag filter. - ResponseFlagFilter response_flag_filter = 9; - - // gRPC status filter. - GrpcStatusFilter grpc_status_filter = 10; - - // Extension filter. - ExtensionFilter extension_filter = 11; - - // Metadata Filter - MetadataFilter metadata_filter = 12; - } -} - -// Filter on an integer comparison. -message ComparisonFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.ComparisonFilter"; - - enum Op { - // = - EQ = 0; - - // >= - GE = 1; - - // <= - LE = 2; - } - - // Comparison operator. - Op op = 1 [(validate.rules).enum = {defined_only: true}]; - - // Value to compare against. - core.v3.RuntimeUInt32 value = 2; -} - -// Filters on HTTP response/status code. -message StatusCodeFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.StatusCodeFilter"; - - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters on total request duration in milliseconds. -message DurationFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.DurationFilter"; - - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters for requests that are not health check requests. A health check -// request is marked by the health check filter. -message NotHealthCheckFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.NotHealthCheckFilter"; -} - -// Filters for requests that are traceable. See the tracing overview for more -// information on how a request becomes traceable. -message TraceableFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.TraceableFilter"; -} - -// Filters for random sampling of requests. -message RuntimeFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.RuntimeFilter"; - - // Runtime key to get an optional overridden numerator for use in the - // *percent_sampled* field. If found in runtime, this value will replace the - // default numerator. - string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; - - // The default sampling percentage. If not specified, defaults to 0% with - // denominator of 100. - type.v3.FractionalPercent percent_sampled = 2; - - // By default, sampling pivots on the header - // :ref:`x-request-id` being - // present. If :ref:`x-request-id` - // is present, the filter will consistently sample across multiple hosts based - // on the runtime key value and the value extracted from - // :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will - // randomly sample based on the runtime key value alone. - // *use_independent_randomness* can be used for logging kill switches within - // complex nested :ref:`AndFilter - // ` and :ref:`OrFilter - // ` blocks that are easier to - // reason about from a probability perspective (i.e., setting to true will - // cause the filter to behave like an independent random variable when - // composed within logical operator filters). - bool use_independent_randomness = 3; -} - -// Performs a logical “and” operation on the result of each filter in filters. -// Filters are evaluated sequentially and if one of them returns false, the -// filter returns false immediately. -message AndFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.AndFilter"; - - repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// Performs a logical “or” operation on the result of each individual filter. -// Filters are evaluated sequentially and if one of them returns true, the -// filter returns true immediately. -message OrFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.OrFilter"; - - repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; -} - -// Filters requests based on the presence or value of a request header. -message HeaderFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.HeaderFilter"; - - // Only requests with a header which matches the specified HeaderMatcher will - // pass the filter check. - route.v3.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; -} - -// Filters requests that received responses with an Envoy response flag set. -// A list of the response flags can be found -// in the access log formatter -// :ref:`documentation`. -message ResponseFlagFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.ResponseFlagFilter"; - - // Only responses with the any of the flags listed in this field will be - // logged. This field is optional. If it is not specified, then any response - // flag will pass the filter check. - repeated string flags = 1 [(validate.rules).repeated = { - items { - string { - in: "LH" - in: "UH" - in: "UT" - in: "LR" - in: "UR" - in: "UF" - in: "UC" - in: "UO" - in: "NR" - in: "DI" - in: "FI" - in: "RL" - in: "UAEX" - in: "RLSE" - in: "DC" - in: "URX" - in: "SI" - in: "IH" - in: "DPE" - in: "UMSDR" - in: "RFCF" - in: "NFCF" - in: "DT" - in: "UPE" - in: "NC" - in: "OM" - } - } - }]; -} - -// Filters gRPC requests based on their response status. If a gRPC status is not -// provided, the filter will infer the status from the HTTP status code. -message GrpcStatusFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.GrpcStatusFilter"; - - enum Status { - OK = 0; - CANCELED = 1; - UNKNOWN = 2; - INVALID_ARGUMENT = 3; - DEADLINE_EXCEEDED = 4; - NOT_FOUND = 5; - ALREADY_EXISTS = 6; - PERMISSION_DENIED = 7; - RESOURCE_EXHAUSTED = 8; - FAILED_PRECONDITION = 9; - ABORTED = 10; - OUT_OF_RANGE = 11; - UNIMPLEMENTED = 12; - INTERNAL = 13; - UNAVAILABLE = 14; - DATA_LOSS = 15; - UNAUTHENTICATED = 16; - } - - // Logs only responses that have any one of the gRPC statuses in this field. - repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - - // If included and set to true, the filter will instead block all responses - // with a gRPC status or inferred gRPC status enumerated in statuses, and - // allow all other responses. - bool exclude = 2; -} - -// Filters based on matching dynamic metadata. -// If the matcher path and key correspond to an existing key in dynamic -// metadata, the request is logged only if the matcher value is equal to the -// metadata value. If the matcher path and key *do not* correspond to an -// existing key in dynamic metadata, the request is logged only if -// match_if_key_not_found is "true" or unset. -message MetadataFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.MetadataFilter"; - - // Matcher to check metadata for specified value. For example, to match on the - // access_log_hint metadata, set the filter to "envoy.common" and the path to - // "access_log_hint", and the value to "true". - type.matcher.v3.MetadataMatcher matcher = 1; - - // Default result if the key does not exist in dynamic metadata: if unset or - // true, then log; if false, then don't log. - google.protobuf.BoolValue match_if_key_not_found = 2; -} - -// Extension filter is statically registered at runtime. -message ExtensionFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.ExtensionFilter"; - - // The name of the filter implementation to instantiate. The name must - // match a statically registered filter. - string name = 1; - - // Custom configuration that depends on the filter being instantiated. - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} diff --git a/generated_api_shadow/envoy/config/bootstrap/v2/BUILD b/generated_api_shadow/envoy/config/bootstrap/v2/BUILD deleted file mode 100644 index 0c656d1a9c5a5..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v2/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/auth:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/config/metrics/v2:pkg", - "//envoy/config/overload/v2alpha:pkg", - "//envoy/config/trace/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto deleted file mode 100644 index 30c276f24276b..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto +++ /dev/null @@ -1,352 +0,0 @@ -syntax = "proto3"; - -package envoy.config.bootstrap.v2; - -import "envoy/api/v2/auth/secret.proto"; -import "envoy/api/v2/cluster.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/core/event_service_config.proto"; -import "envoy/api/v2/core/socket_option.proto"; -import "envoy/api/v2/listener.proto"; -import "envoy/config/metrics/v2/stats.proto"; -import "envoy/config/overload/v2alpha/overload.proto"; -import "envoy/config/trace/v2/http_tracer.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.bootstrap.v2"; -option java_outer_classname = "BootstrapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Bootstrap] -// This proto is supplied via the :option:`-c` CLI flag and acts as the root -// of the Envoy v2 configuration. See the :ref:`v2 configuration overview -// ` for more detail. - -// Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] -message Bootstrap { - message StaticResources { - // Static :ref:`Listeners `. These listeners are - // available regardless of LDS configuration. - repeated api.v2.Listener listeners = 1; - - // If a network based configuration source is specified for :ref:`cds_config - // `, it's necessary - // to have some initial cluster definitions available to allow Envoy to know - // how to speak to the management server. These cluster definitions may not - // use :ref:`EDS ` (i.e. they should be static - // IP or DNS-based). - repeated api.v2.Cluster clusters = 2; - - // These static secrets can be used by :ref:`SdsSecretConfig - // ` - repeated api.v2.auth.Secret secrets = 3; - } - - message DynamicResources { - reserved 4; - - // All :ref:`Listeners ` are provided by a single - // :ref:`LDS ` configuration source. - api.v2.core.ConfigSource lds_config = 1; - - // All post-bootstrap :ref:`Cluster ` definitions are - // provided by a single :ref:`CDS ` - // configuration source. - api.v2.core.ConfigSource cds_config = 2; - - // A single :ref:`ADS ` source may be optionally - // specified. This must have :ref:`api_type - // ` :ref:`GRPC - // `. Only - // :ref:`ConfigSources ` that have - // the :ref:`ads ` field set will be - // streamed on the ADS channel. - api.v2.core.ApiConfigSource ads_config = 3; - } - - reserved 10; - - // Node identity to present to the management server and for instance - // identification purposes (e.g. in generated headers). - api.v2.core.Node node = 1; - - // Statically specified resources. - StaticResources static_resources = 2; - - // xDS configuration sources. - DynamicResources dynamic_resources = 3; - - // Configuration for the cluster manager which owns all upstream clusters - // within the server. - ClusterManager cluster_manager = 4; - - // Health discovery service config option. - // (:ref:`core.ApiConfigSource `) - api.v2.core.ApiConfigSource hds_config = 14; - - // Optional file system path to search for startup flag files. - string flags_path = 5; - - // Optional set of stats sinks. - repeated metrics.v2.StatsSink stats_sinks = 6; - - // Configuration for internal processing of stats. - metrics.v2.StatsConfig stats_config = 13; - - // Optional duration between flushes to configured stats sinks. For - // performance reasons Envoy latches counters and only flushes counters and - // gauges at a periodic interval. If not specified the default is 5000ms (5 - // seconds). - // Duration must be at least 1ms and at most 5 min. - google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { - lt {seconds: 300} - gte {nanos: 1000000} - }]; - - // Optional watchdog configuration. - Watchdog watchdog = 8; - - // Configuration for an external tracing provider. - // - // .. attention:: - // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider - // `. - trace.v2.Tracing tracing = 9; - - // Configuration for the runtime configuration provider (deprecated). If not - // specified, a “null” provider will be used which will result in all defaults - // being used. - Runtime runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Configuration for the runtime configuration provider. If not - // specified, a “null” provider will be used which will result in all defaults - // being used. - LayeredRuntime layered_runtime = 17; - - // Configuration for the local administration HTTP server. - Admin admin = 12; - - // Optional overload manager configuration. - overload.v2alpha.OverloadManager overload_manager = 15; - - // Enable :ref:`stats for event dispatcher `, defaults to false. - // Note that this records a value for each iteration of the event loop on every thread. This - // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value - // over the wire individually because the statsd protocol doesn't have any way to represent a - // histogram summary. Be aware that this can be a very large volume of data. - bool enable_dispatcher_stats = 16; - - // Optional string which will be used in lieu of x-envoy in prefixing headers. - // - // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be - // transformed into x-foo-retry-on etc. - // - // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the - // headers Envoy will trust for core code and core extensions only. Be VERY careful making - // changes to this string, especially in multi-layer Envoy deployments or deployments using - // extensions which are not upstream. - string header_prefix = 18; - - // Optional proxy version which will be used to set the value of :ref:`server.version statistic - // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. - google.protobuf.UInt64Value stats_server_version_override = 19; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // This may be overridden on a per-cluster basis in cds_config, - // when :ref:`dns_resolvers ` and - // :ref:`use_tcp_for_dns_lookups ` are - // specified. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - bool use_tcp_for_dns_lookups = 20; -} - -// Administration interface :ref:`operations documentation -// `. -message Admin { - // The path to write the access log for the administration server. If no - // access log is desired specify ‘/dev/null’. This is only required if - // :ref:`address ` is set. - string access_log_path = 1; - - // The cpu profiler output path for the administration server. If no profile - // path is specified, the default is ‘/var/log/envoy/envoy.prof’. - string profile_path = 2; - - // The TCP address that the administration server will listen on. - // If not specified, Envoy will not start an administration server. - api.v2.core.Address address = 3; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated api.v2.core.SocketOption socket_options = 4; -} - -// Cluster manager :ref:`architecture overview `. -message ClusterManager { - message OutlierDetection { - // Specifies the path to the outlier event log. - string event_log_path = 1; - - // [#not-implemented-hide:] - // The gRPC service for the outlier detection event service. - // If empty, outlier detection events won't be sent to a remote endpoint. - api.v2.core.EventServiceConfig event_service = 2; - } - - // Name of the local cluster (i.e., the cluster that owns the Envoy running - // this configuration). In order to enable :ref:`zone aware routing - // ` this option must be set. - // If *local_cluster_name* is defined then :ref:`clusters - // ` must be defined in the :ref:`Bootstrap - // static cluster resources - // `. This is unrelated to - // the :option:`--service-cluster` option which does not `affect zone aware - // routing `_. - string local_cluster_name = 1; - - // Optional global configuration for outlier detection. - OutlierDetection outlier_detection = 2; - - // Optional configuration used to bind newly established upstream connections. - // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. - api.v2.core.BindConfig upstream_bind_config = 3; - - // A management server endpoint to stream load stats to via - // *StreamLoadStats*. This must have :ref:`api_type - // ` :ref:`GRPC - // `. - api.v2.core.ApiConfigSource load_stats_config = 4; -} - -// Envoy process watchdog configuration. When configured, this monitors for -// nonresponsive threads and kills the process after the configured thresholds. -// See the :ref:`watchdog documentation ` for more information. -message Watchdog { - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_miss* statistic. If not specified the default is 200ms. - google.protobuf.Duration miss_timeout = 1; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_mega_miss* statistic. If not specified the default is - // 1000ms. - google.protobuf.Duration megamiss_timeout = 2; - - // If a watched thread has been nonresponsive for this duration, assume a - // programming error and kill the entire Envoy process. Set to 0 to disable - // kill behavior. If not specified the default is 0 (disabled). - google.protobuf.Duration kill_timeout = 3; - - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). - google.protobuf.Duration multikill_timeout = 4; -} - -// Runtime :ref:`configuration overview ` (deprecated). -message Runtime { - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. Envoy - // will watch the location for changes and reload the file system tree when - // they happen. If this parameter is not set, there will be no disk based - // runtime. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 2; - - // Specifies an optional subdirectory to load within the root directory. If - // specified and the directory exists, configuration values within this - // directory will override those found in the primary subdirectory. This is - // useful when Envoy is deployed across many different types of servers. - // Sometimes it is useful to have a per service cluster directory for runtime - // configuration. See below for exactly how the override directory is used. - string override_subdirectory = 3; - - // Static base runtime. This will be :ref:`overridden - // ` by other runtime layers, e.g. - // disk or admin. This follows the :ref:`runtime protobuf JSON representation - // encoding `. - google.protobuf.Struct base = 4; -} - -// [#next-free-field: 6] -message RuntimeLayer { - // :ref:`Disk runtime ` layer. - message DiskLayer { - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. - // Envoy will watch the location for changes and reload the file system tree - // when they happen. See documentation on runtime :ref:`atomicity - // ` for further details on how reloads are - // treated. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 3; - - // :ref:`Append ` the - // service cluster to the path under symlink root. - bool append_service_cluster = 2; - } - - // :ref:`Admin console runtime ` layer. - message AdminLayer { - } - - // :ref:`Runtime Discovery Service (RTDS) ` layer. - message RtdsLayer { - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; - - // RTDS configuration source. - api.v2.core.ConfigSource rtds_config = 2; - } - - // Descriptive name for the runtime layer. This is only used for the runtime - // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof layer_specifier { - option (validate.required) = true; - - // :ref:`Static runtime ` layer. - // This follows the :ref:`runtime protobuf JSON representation encoding - // `. Unlike static xDS resources, this static - // layer is overridable by later layers in the runtime virtual filesystem. - google.protobuf.Struct static_layer = 2; - - DiskLayer disk_layer = 3; - - AdminLayer admin_layer = 4; - - RtdsLayer rtds_layer = 5; - } -} - -// Runtime :ref:`configuration overview `. -message LayeredRuntime { - // The :ref:`layers ` of the runtime. This is ordered - // such that later layers in the list overlay earlier entries. - repeated RuntimeLayer layers = 1; -} diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD deleted file mode 100644 index 48e8fb522c993..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/bootstrap/v2:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/config/metrics/v3:pkg", - "//envoy/config/overload/v3:pkg", - "//envoy/config/trace/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto deleted file mode 100644 index 9171d066a4302..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ /dev/null @@ -1,648 +0,0 @@ -syntax = "proto3"; - -package envoy.config.bootstrap.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/cluster/v3/cluster.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/event_service_config.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/resolver.proto"; -import "envoy/config/core/v3/socket_option.proto"; -import "envoy/config/listener/v3/listener.proto"; -import "envoy/config/metrics/v3/stats.proto"; -import "envoy/config/overload/v3/overload.proto"; -import "envoy/config/trace/v3/http_tracer.proto"; -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.bootstrap.v3"; -option java_outer_classname = "BootstrapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Bootstrap] -// This proto is supplied via the :option:`-c` CLI flag and acts as the root -// of the Envoy v3 configuration. See the :ref:`v3 configuration overview -// ` for more detail. - -// Bootstrap :ref:`configuration overview `. -// [#next-free-field: 33] -message Bootstrap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.Bootstrap"; - - message StaticResources { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.Bootstrap.StaticResources"; - - // Static :ref:`Listeners `. These listeners are - // available regardless of LDS configuration. - repeated listener.v3.Listener listeners = 1; - - // If a network based configuration source is specified for :ref:`cds_config - // `, it's necessary - // to have some initial cluster definitions available to allow Envoy to know - // how to speak to the management server. These cluster definitions may not - // use :ref:`EDS ` (i.e. they should be static - // IP or DNS-based). - repeated cluster.v3.Cluster clusters = 2; - - // These static secrets can be used by :ref:`SdsSecretConfig - // ` - repeated envoy.extensions.transport_sockets.tls.v3.Secret secrets = 3; - } - - // [#next-free-field: 7] - message DynamicResources { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.Bootstrap.DynamicResources"; - - reserved 4; - - // All :ref:`Listeners ` are provided by a single - // :ref:`LDS ` configuration source. - core.v3.ConfigSource lds_config = 1; - - // xdstp:// resource locator for listener collection. - // [#not-implemented-hide:] - string lds_resources_locator = 5; - - // All post-bootstrap :ref:`Cluster ` definitions are - // provided by a single :ref:`CDS ` - // configuration source. - core.v3.ConfigSource cds_config = 2; - - // xdstp:// resource locator for cluster collection. - // [#not-implemented-hide:] - string cds_resources_locator = 6; - - // A single :ref:`ADS ` source may be optionally - // specified. This must have :ref:`api_type - // ` :ref:`GRPC - // `. Only - // :ref:`ConfigSources ` that have - // the :ref:`ads ` field set will be - // streamed on the ADS channel. - core.v3.ApiConfigSource ads_config = 3; - } - - reserved 10; - - // Node identity to present to the management server and for instance - // identification purposes (e.g. in generated headers). - core.v3.Node node = 1; - - // A list of :ref:`Node ` field names - // that will be included in the context parameters of the effective - // xdstp:// URL that is sent in a discovery request when resource - // locators are used for LDS/CDS. Any non-string field will have its JSON - // encoding set as the context parameter value, with the exception of - // metadata, which will be flattened (see example below). The supported field - // names are: - // - "cluster" - // - "id" - // - "locality.region" - // - "locality.sub_zone" - // - "locality.zone" - // - "metadata" - // - "user_agent_build_version.metadata" - // - "user_agent_build_version.version" - // - "user_agent_name" - // - "user_agent_version" - // - // The node context parameters act as a base layer dictionary for the context - // parameters (i.e. more specific resource specific context parameters will - // override). Field names will be prefixed with “udpa.node.” when included in - // context parameters. - // - // For example, if node_context_params is ``["user_agent_name", "metadata"]``, - // the implied context parameters might be:: - // - // node.user_agent_name: "envoy" - // node.metadata.foo: "{\"bar\": \"baz\"}" - // node.metadata.some: "42" - // node.metadata.thing: "\"thing\"" - // - // [#not-implemented-hide:] - repeated string node_context_params = 26; - - // Statically specified resources. - StaticResources static_resources = 2; - - // xDS configuration sources. - DynamicResources dynamic_resources = 3; - - // Configuration for the cluster manager which owns all upstream clusters - // within the server. - ClusterManager cluster_manager = 4; - - // Health discovery service config option. - // (:ref:`core.ApiConfigSource `) - core.v3.ApiConfigSource hds_config = 14; - - // Optional file system path to search for startup flag files. - string flags_path = 5; - - // Optional set of stats sinks. - repeated metrics.v3.StatsSink stats_sinks = 6; - - // Configuration for internal processing of stats. - metrics.v3.StatsConfig stats_config = 13; - - // Optional duration between flushes to configured stats sinks. For - // performance reasons Envoy latches counters and only flushes counters and - // gauges at a periodic interval. If not specified the default is 5000ms (5 - // seconds). Only one of `stats_flush_interval` or `stats_flush_on_admin` - // can be set. - // Duration must be at least 1ms and at most 5 min. - google.protobuf.Duration stats_flush_interval = 7 [ - (validate.rules).duration = { - lt {seconds: 300} - gte {nanos: 1000000} - }, - (udpa.annotations.field_migrate).oneof_promotion = "stats_flush" - ]; - - oneof stats_flush { - // Flush stats to sinks only when queried for on the admin interface. If set, - // a flush timer is not created. Only one of `stats_flush_on_admin` or - // `stats_flush_interval` can be set. - bool stats_flush_on_admin = 29 [(validate.rules).bool = {const: true}]; - } - - // Optional watchdog configuration. - // This is for a single watchdog configuration for the entire system. - // Deprecated in favor of *watchdogs* which has finer granularity. - Watchdog watchdog = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional watchdogs configuration. - // This is used for specifying different watchdogs for the different subsystems. - // [#extension-category: envoy.guarddog_actions] - Watchdogs watchdogs = 27; - - // Configuration for an external tracing provider. - // - // .. attention:: - // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider - // `. - trace.v3.Tracing tracing = 9 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Configuration for the runtime configuration provider. If not - // specified, a “null” provider will be used which will result in all defaults - // being used. - LayeredRuntime layered_runtime = 17; - - // Configuration for the local administration HTTP server. - Admin admin = 12; - - // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15 [ - (udpa.annotations.security).configure_for_untrusted_downstream = true, - (udpa.annotations.security).configure_for_untrusted_upstream = true - ]; - - // Enable :ref:`stats for event dispatcher `, defaults to false. - // Note that this records a value for each iteration of the event loop on every thread. This - // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value - // over the wire individually because the statsd protocol doesn't have any way to represent a - // histogram summary. Be aware that this can be a very large volume of data. - bool enable_dispatcher_stats = 16; - - // Optional string which will be used in lieu of x-envoy in prefixing headers. - // - // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be - // transformed into x-foo-retry-on etc. - // - // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the - // headers Envoy will trust for core code and core extensions only. Be VERY careful making - // changes to this string, especially in multi-layer Envoy deployments or deployments using - // extensions which are not upstream. - string header_prefix = 18; - - // Optional proxy version which will be used to set the value of :ref:`server.version statistic - // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. - google.protobuf.UInt64Value stats_server_version_override = 19; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // This may be overridden on a per-cluster basis in cds_config, - // when :ref:`dns_resolvers ` and - // :ref:`use_tcp_for_dns_lookups ` are - // specified. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - bool use_tcp_for_dns_lookups = 20 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // This may be overridden on a per-cluster basis in cds_config, when - // :ref:`dns_resolution_config ` - // is specified. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v3.DnsResolutionConfig dns_resolution_config = 30; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - core.v3.TypedExtensionConfig typed_dns_resolver_config = 31; - - // Specifies optional bootstrap extensions to be instantiated at startup time. - // Each item contains extension specific configuration. - // [#extension-category: envoy.bootstrap] - repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21; - - // Specifies optional extensions instantiated at startup time and - // invoked during crash time on the request that caused the crash. - repeated FatalAction fatal_actions = 28; - - // Configuration sources that will participate in - // xdstp:// URL authority resolution. The algorithm is as - // follows: - // 1. The authority field is taken from the xdstp:// URL, call - // this *resource_authority*. - // 2. *resource_authority* is compared against the authorities in any peer - // *ConfigSource*. The peer *ConfigSource* is the configuration source - // message which would have been used unconditionally for resolution - // with opaque resource names. If there is a match with an authority, the - // peer *ConfigSource* message is used. - // 3. *resource_authority* is compared sequentially with the authorities in - // each configuration source in *config_sources*. The first *ConfigSource* - // to match wins. - // 4. As a fallback, if no configuration source matches, then - // *default_config_source* is used. - // 5. If *default_config_source* is not specified, resolution fails. - // [#not-implemented-hide:] - repeated core.v3.ConfigSource config_sources = 22; - - // Default configuration source for xdstp:// URLs if all - // other resolution fails. - // [#not-implemented-hide:] - core.v3.ConfigSource default_config_source = 23; - - // Optional overriding of default socket interface. The value must be the name of one of the - // socket interface factories initialized through a bootstrap extension - string default_socket_interface = 24; - - // Global map of CertificateProvider instances. These instances are referred to by name in the - // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name - // ` - // field. - // [#not-implemented-hide:] - map certificate_provider_instances = 25; - - // Specifies a set of headers that need to be registered as inline header. This configuration - // allows users to customize the inline headers on-demand at Envoy startup without modifying - // Envoy's source code. - // - // Note that the 'set-cookie' header cannot be registered as inline header. - repeated CustomInlineHeader inline_headers = 32; - - Runtime hidden_envoy_deprecated_runtime = 11 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// Administration interface :ref:`operations documentation -// `. -// [#next-free-field: 6] -message Admin { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Admin"; - - // Configuration for :ref:`access logs ` - // emitted by the administration server. - repeated accesslog.v3.AccessLog access_log = 5; - - // The path to write the access log for the administration server. If no - // access log is desired specify ‘/dev/null’. This is only required if - // :ref:`address ` is set. - // Deprecated in favor of *access_log* which offers more options. - string access_log_path = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The cpu profiler output path for the administration server. If no profile - // path is specified, the default is ‘/var/log/envoy/envoy.prof’. - string profile_path = 2; - - // The TCP address that the administration server will listen on. - // If not specified, Envoy will not start an administration server. - core.v3.Address address = 3; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.v3.SocketOption socket_options = 4; -} - -// Cluster manager :ref:`architecture overview `. -message ClusterManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.ClusterManager"; - - message OutlierDetection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.ClusterManager.OutlierDetection"; - - // Specifies the path to the outlier event log. - string event_log_path = 1; - - // [#not-implemented-hide:] - // The gRPC service for the outlier detection event service. - // If empty, outlier detection events won't be sent to a remote endpoint. - core.v3.EventServiceConfig event_service = 2; - } - - // Name of the local cluster (i.e., the cluster that owns the Envoy running - // this configuration). In order to enable :ref:`zone aware routing - // ` this option must be set. - // If *local_cluster_name* is defined then :ref:`clusters - // ` must be defined in the :ref:`Bootstrap - // static cluster resources - // `. This is unrelated to - // the :option:`--service-cluster` option which does not `affect zone aware - // routing `_. - string local_cluster_name = 1; - - // Optional global configuration for outlier detection. - OutlierDetection outlier_detection = 2; - - // Optional configuration used to bind newly established upstream connections. - // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. - core.v3.BindConfig upstream_bind_config = 3; - - // A management server endpoint to stream load stats to via - // *StreamLoadStats*. This must have :ref:`api_type - // ` :ref:`GRPC - // `. - core.v3.ApiConfigSource load_stats_config = 4; -} - -// Allows you to specify different watchdog configs for different subsystems. -// This allows finer tuned policies for the watchdog. If a subsystem is omitted -// the default values for that system will be used. -message Watchdogs { - // Watchdog for the main thread. - Watchdog main_thread_watchdog = 1; - - // Watchdog for the worker threads. - Watchdog worker_watchdog = 2; -} - -// Envoy process watchdog configuration. When configured, this monitors for -// nonresponsive threads and kills the process after the configured thresholds. -// See the :ref:`watchdog documentation ` for more information. -// [#next-free-field: 8] -message Watchdog { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog"; - - message WatchdogAction { - // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS. - // Within an event type, actions execute in the order they are configured. - // For KILL/MULTIKILL there is a default PANIC that will run after the - // registered actions and kills the process if it wasn't already killed. - // It might be useful to specify several debug actions, and possibly an - // alternate FATAL action. - enum WatchdogEvent { - UNKNOWN = 0; - KILL = 1; - MULTIKILL = 2; - MEGAMISS = 3; - MISS = 4; - } - - // Extension specific configuration for the action. - core.v3.TypedExtensionConfig config = 1; - - WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // Register actions that will fire on given WatchDog events. - // See *WatchDogAction* for priority of events. - repeated WatchdogAction actions = 7; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_miss* statistic. If not specified the default is 200ms. - google.protobuf.Duration miss_timeout = 1; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_mega_miss* statistic. If not specified the default is - // 1000ms. - google.protobuf.Duration megamiss_timeout = 2; - - // If a watched thread has been nonresponsive for this duration, assume a - // programming error and kill the entire Envoy process. Set to 0 to disable - // kill behavior. If not specified the default is 0 (disabled). - google.protobuf.Duration kill_timeout = 3; - - // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is - // enabled. Enabling this feature would help to reduce risk of synchronized - // watchdog kill events across proxies due to external triggers. Set to 0 to - // disable. If not specified the default is 0 (disabled). - google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; - - // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) - // threads have been nonresponsive for at least this duration kill the entire - // Envoy process. Set to 0 to disable this behavior. If not specified the - // default is 0 (disabled). - google.protobuf.Duration multikill_timeout = 4; - - // Sets the threshold for *multikill_timeout* in terms of the percentage of - // nonresponsive threads required for the *multikill_timeout*. - // If not specified the default is 0. - type.v3.Percent multikill_threshold = 5; -} - -// Fatal actions to run while crashing. Actions can be safe (meaning they are -// async-signal safe) or unsafe. We run all safe actions before we run unsafe actions. -// If using an unsafe action that could get stuck or deadlock, it important to -// have an out of band system to terminate the process. -// -// The interface for the extension is ``Envoy::Server::Configuration::FatalAction``. -// *FatalAction* extensions live in the ``envoy.extensions.fatal_actions`` API -// namespace. -message FatalAction { - // Extension specific configuration for the action. It's expected to conform - // to the ``Envoy::Server::Configuration::FatalAction`` interface. - core.v3.TypedExtensionConfig config = 1; -} - -// Runtime :ref:`configuration overview ` (deprecated). -message Runtime { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Runtime"; - - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. Envoy - // will watch the location for changes and reload the file system tree when - // they happen. If this parameter is not set, there will be no disk based - // runtime. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 2; - - // Specifies an optional subdirectory to load within the root directory. If - // specified and the directory exists, configuration values within this - // directory will override those found in the primary subdirectory. This is - // useful when Envoy is deployed across many different types of servers. - // Sometimes it is useful to have a per service cluster directory for runtime - // configuration. See below for exactly how the override directory is used. - string override_subdirectory = 3; - - // Static base runtime. This will be :ref:`overridden - // ` by other runtime layers, e.g. - // disk or admin. This follows the :ref:`runtime protobuf JSON representation - // encoding `. - google.protobuf.Struct base = 4; -} - -// [#next-free-field: 6] -message RuntimeLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.RuntimeLayer"; - - // :ref:`Disk runtime ` layer. - message DiskLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.RuntimeLayer.DiskLayer"; - - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. - // Envoy will watch the location for changes and reload the file system tree - // when they happen. See documentation on runtime :ref:`atomicity - // ` for further details on how reloads are - // treated. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 3; - - // :ref:`Append ` the - // service cluster to the path under symlink root. - bool append_service_cluster = 2; - } - - // :ref:`Admin console runtime ` layer. - message AdminLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.RuntimeLayer.AdminLayer"; - } - - // :ref:`Runtime Discovery Service (RTDS) ` layer. - message RtdsLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer"; - - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; - - // RTDS configuration source. - core.v3.ConfigSource rtds_config = 2; - } - - // Descriptive name for the runtime layer. This is only used for the runtime - // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof layer_specifier { - option (validate.required) = true; - - // :ref:`Static runtime ` layer. - // This follows the :ref:`runtime protobuf JSON representation encoding - // `. Unlike static xDS resources, this static - // layer is overridable by later layers in the runtime virtual filesystem. - google.protobuf.Struct static_layer = 2; - - DiskLayer disk_layer = 3; - - AdminLayer admin_layer = 4; - - RtdsLayer rtds_layer = 5; - } -} - -// Runtime :ref:`configuration overview `. -message LayeredRuntime { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.LayeredRuntime"; - - // The :ref:`layers ` of the runtime. This is ordered - // such that later layers in the list overlay earlier entries. - repeated RuntimeLayer layers = 1; -} - -// Used to specify the header that needs to be registered as an inline header. -// -// If request or response contain multiple headers with the same name and the header -// name is registered as an inline header. Then multiple headers will be folded -// into one, and multiple header values will be concatenated by a suitable delimiter. -// The delimiter is generally a comma. -// -// For example, if 'foo' is registered as an inline header, and the headers contains -// the following two headers: -// -// .. code-block:: text -// -// foo: bar -// foo: eep -// -// Then they will eventually be folded into: -// -// .. code-block:: text -// -// foo: bar, eep -// -// Inline headers provide O(1) search performance, but each inline header imposes -// an additional memory overhead on all instances of the corresponding type of -// HeaderMap or TrailerMap. -message CustomInlineHeader { - enum InlineHeaderType { - REQUEST_HEADER = 0; - REQUEST_TRAILER = 1; - RESPONSE_HEADER = 2; - RESPONSE_TRAILER = 3; - } - - // The name of the header that is expected to be set as the inline header. - string inline_header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The type of the header that is expected to be set as the inline header. - InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/BUILD b/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto deleted file mode 100644 index a0fdadd75724c..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.aggregate.v2alpha; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.aggregate.v2alpha"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.clusters.aggregate.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Aggregate cluster configuration] - -// Configuration for the aggregate cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.aggregate] -message ClusterConfig { - // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they - // appear in this list. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD b/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD deleted file mode 100644 index 25c228fd56093..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto deleted file mode 100644 index 33f5ffe057e3a..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.dynamic_forward_proxy.v2alpha; - -import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.clusters.dynamic_forward_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamic forward proxy cluster configuration] - -// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.dynamic_forward_proxy] -message ClusterConfig { - // The DNS cache configuration that the cluster will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy HTTP filter configuration - // `. - common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/cluster/redis/BUILD b/generated_api_shadow/envoy/config/cluster/redis/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/redis/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto b/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto deleted file mode 100644 index abe88f76a6ff8..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto +++ /dev/null @@ -1,81 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.redis; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.redis"; -option java_outer_classname = "RedisClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Redis Cluster Configuration] -// This cluster adds support for `Redis Cluster `_, as part -// of :ref:`Envoy's support for Redis Cluster `. -// -// Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its primary fails over to a replica, and designates it as the new primary). -// However, as there is no unified frontend or proxy service in front of Redis Cluster, the client -// (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the -// topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS -// command `_. This result is then stored locally, and -// updated at user-configured intervals. -// -// Additionally, if -// :ref:`enable_redirection` -// is true, then moved and ask redirection errors from upstream servers will trigger a topology -// refresh when they exceed a user-configured error threshold. -// -// Example: -// -// .. code-block:: yaml -// -// name: name -// connect_timeout: 0.25s -// dns_lookup_family: V4_ONLY -// hosts: -// - socket_address: -// address: foo.bar.com -// port_value: 22120 -// cluster_type: -// name: envoy.clusters.redis -// typed_config: -// "@type": type.googleapis.com/google.protobuf.Struct -// value: -// cluster_refresh_rate: 30s -// cluster_refresh_timeout: 0.5s -// redirect_refresh_interval: 10s -// redirect_refresh_threshold: 10 -// [#extension: envoy.clusters.redis] - -// [#next-free-field: 7] -message RedisClusterConfig { - // Interval between successive topology refresh requests. If not set, this defaults to 5s. - google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}]; - - // Timeout for topology refresh request. If not set, this defaults to 3s. - google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}]; - - // The minimum interval that must pass after triggering a topology refresh request before a new - // request can possibly be triggered again. Any errors received during one of these - // time intervals are ignored. If not set, this defaults to 5s. - google.protobuf.Duration redirect_refresh_interval = 3; - - // The number of redirection errors that must be received before - // triggering a topology refresh request. If not set, this defaults to 5. - // If this is set to 0, topology refresh after redirect is disabled. - google.protobuf.UInt32Value redirect_refresh_threshold = 4; - - // The number of failures that must be received before triggering a topology refresh request. - // If not set, this defaults to 0, which disables the topology refresh due to failure. - uint32 failure_refresh_threshold = 5; - - // The number of hosts became degraded or unhealthy before triggering a topology refresh request. - // If not set, this defaults to 0, which disables the topology refresh due to degraded or - // unhealthy host. - uint32 host_degraded_refresh_threshold = 6; -} diff --git a/generated_api_shadow/envoy/config/cluster/v3/BUILD b/generated_api_shadow/envoy/config/cluster/v3/BUILD deleted file mode 100644 index 53f05bbbd9eba..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/BUILD +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/cluster:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto b/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto deleted file mode 100644 index 82cd329b91a72..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v3"; -option java_outer_classname = "CircuitBreakerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Circuit breakers] - -// :ref:`Circuit breaking` settings can be -// specified individually for each defined priority. -message CircuitBreakers { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.cluster.CircuitBreakers"; - - // A Thresholds defines CircuitBreaker settings for a - // :ref:`RoutingPriority`. - // [#next-free-field: 9] - message Thresholds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.cluster.CircuitBreakers.Thresholds"; - - message RetryBudget { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.cluster.CircuitBreakers.Thresholds.RetryBudget"; - - // Specifies the limit on concurrent retries as a percentage of the sum of active requests and - // active pending requests. For example, if there are 100 active requests and the - // budget_percent is set to 25, there may be 25 active retries. - // - // This parameter is optional. Defaults to 20%. - type.v3.Percent budget_percent = 1; - - // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the - // number of active retries may never go below this number. - // - // This parameter is optional. Defaults to 3. - google.protobuf.UInt32Value min_retry_concurrency = 2; - } - - // The :ref:`RoutingPriority` - // the specified CircuitBreaker settings apply to. - core.v3.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; - - // The maximum number of connections that Envoy will make to the upstream - // cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_connections = 2; - - // The maximum number of pending requests that Envoy will allow to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 3; - - // The maximum number of parallel requests that Envoy will make to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_requests = 4; - - // The maximum number of parallel retries that Envoy will allow to the - // upstream cluster. If not specified, the default is 3. - google.protobuf.UInt32Value max_retries = 5; - - // Specifies a limit on concurrent retries in relation to the number of active requests. This - // parameter is optional. - // - // .. note:: - // - // If this field is set, the retry budget will override any configured retry circuit - // breaker. - RetryBudget retry_budget = 8; - - // If track_remaining is true, then stats will be published that expose - // the number of resources remaining until the circuit breakers open. If - // not specified, the default is false. - // - // .. note:: - // - // If a retry budget is used in lieu of the max_retries circuit breaker, - // the remaining retry resources remaining will not be tracked. - bool track_remaining = 6; - - // The maximum number of connection pools per cluster that Envoy will concurrently support at - // once. If not specified, the default is unlimited. Set this for clusters which create a - // large number of connection pools. See - // :ref:`Circuit Breaking ` for - // more details. - google.protobuf.UInt32Value max_connection_pools = 7; - } - - // If multiple :ref:`Thresholds` - // are defined with the same :ref:`RoutingPriority`, - // the first one in the list is used. If no Thresholds is defined for a given - // :ref:`RoutingPriority`, the default values - // are used. - repeated Thresholds thresholds = 1; -} diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto deleted file mode 100644 index 2e40700c3ace7..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ /dev/null @@ -1,1163 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v3; - -import "envoy/config/cluster/v3/circuit_breaker.proto"; -import "envoy/config/cluster/v3/filter.proto"; -import "envoy/config/cluster/v3/outlier_detection.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/health_check.proto"; -import "envoy/config/core/v3/protocol.proto"; -import "envoy/config/core/v3/resolver.proto"; -import "envoy/config/endpoint/v3/endpoint.proto"; -import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/collection_entry.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v3"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Cluster configuration] - -// Cluster list collections. Entries are *Cluster* resources or references. -// [#not-implemented-hide:] -message ClusterCollection { - xds.core.v3.CollectionEntry entries = 1; -} - -// Configuration for a single upstream cluster. -// [#next-free-field: 56] -message Cluster { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; - - // Refer to :ref:`service discovery type ` - // for an explanation on each type. - enum DiscoveryType { - // Refer to the :ref:`static discovery type` - // for an explanation. - STATIC = 0; - - // Refer to the :ref:`strict DNS discovery - // type` - // for an explanation. - STRICT_DNS = 1; - - // Refer to the :ref:`logical DNS discovery - // type` - // for an explanation. - LOGICAL_DNS = 2; - - // Refer to the :ref:`service discovery type` - // for an explanation. - EDS = 3; - - // Refer to the :ref:`original destination discovery - // type` - // for an explanation. - ORIGINAL_DST = 4; - } - - // Refer to :ref:`load balancer type ` architecture - // overview section for information on each type. - enum LbPolicy { - // Refer to the :ref:`round robin load balancing - // policy` - // for an explanation. - ROUND_ROBIN = 0; - - // Refer to the :ref:`least request load balancing - // policy` - // for an explanation. - LEAST_REQUEST = 1; - - // Refer to the :ref:`ring hash load balancing - // policy` - // for an explanation. - RING_HASH = 2; - - // Refer to the :ref:`random load balancing - // policy` - // for an explanation. - RANDOM = 3; - - // Refer to the :ref:`Maglev load balancing policy` - // for an explanation. - MAGLEV = 5; - - // This load balancer type must be specified if the configured cluster provides a cluster - // specific load balancer. Consult the configured cluster's documentation for whether to set - // this option or not. - CLUSTER_PROVIDED = 6; - - // Use the new :ref:`load_balancing_policy - // ` field to determine the LB policy. - // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - // and instead using the new load_balancing_policy field as the one and only mechanism for - // configuring this.] - LOAD_BALANCING_POLICY_CONFIG = 7; - - hidden_envoy_deprecated_ORIGINAL_DST_LB = 4 [ - deprecated = true, - (envoy.annotations.disallowed_by_default_enum) = true, - (envoy.annotations.deprecated_at_minor_version_enum) = "3.0" - ]; - } - - // When V4_ONLY is selected, the DNS resolver will only perform a lookup for - // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will - // only perform a lookup for addresses in the IPv6 family. If AUTO is - // specified, the DNS resolver will first perform a lookup for addresses in - // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. - // For cluster types other than - // :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS`, - // this setting is - // ignored. - enum DnsLookupFamily { - AUTO = 0; - V4_ONLY = 1; - V6_ONLY = 2; - } - - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - - // TransportSocketMatch specifies what transport socket config will be used - // when the match conditions are satisfied. - message TransportSocketMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.TransportSocketMatch"; - - // The name of the match, used in stats generation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria. - // The connection to the endpoint with metadata matching what is set in this field - // will use the transport socket configuration specified here. - // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match - // against the values specified in this field. - google.protobuf.Struct match = 2; - - // The configuration of the transport socket. - // [#extension-category: envoy.transport_sockets.upstream] - core.v3.TransportSocket transport_socket = 3; - } - - // Extended cluster type. - message CustomClusterType { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CustomClusterType"; - - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - // [#extension-category: envoy.clusters] - google.protobuf.Any typed_config = 2; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.EdsClusterConfig"; - - // Configuration for the source of EDS updates for this Cluster. - core.v3.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. This may be a xdstp:// URL. - string service_name = 2; - } - - // Optionally divide the endpoints in this cluster into subsets defined by - // endpoint metadata and selected by route and weighted cluster metadata. - // [#next-free-field: 8] - message LbSubsetConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.LbSubsetConfig"; - - // If NO_FALLBACK is selected, a result - // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, - // any cluster endpoint may be returned (subject to policy, health checks, - // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - enum LbSubsetFallbackPolicy { - NO_FALLBACK = 0; - ANY_ENDPOINT = 1; - DEFAULT_SUBSET = 2; - } - - // Specifications for subsets. - message LbSubsetSelector { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.LbSubsetConfig.LbSubsetSelector"; - - // Allows to override top level fallback policy per selector. - enum LbSubsetSelectorFallbackPolicy { - // If NOT_DEFINED top level config fallback policy is used instead. - NOT_DEFINED = 0; - - // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. - NO_FALLBACK = 1; - - // If ANY_ENDPOINT is selected, any cluster endpoint may be returned - // (subject to policy, health checks, etc). - ANY_ENDPOINT = 2; - - // If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - DEFAULT_SUBSET = 3; - - // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata - // keys reduced to - // :ref:`fallback_keys_subset`. - // It allows for a fallback to a different, less specific selector if some of the keys of - // the selector are considered optional. - KEYS_SUBSET = 4; - } - - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - - // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for - // choosing a host, but updating hosts is faster, especially for large numbers of hosts. - // - // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy. - // - // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains - // only one entry. - // - // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys` - // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge - // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are - // present in the current configuration. - bool single_host_per_subset = 4; - - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum = {defined_only: true}]; - - // Subset of - // :ref:`keys` used by - // :ref:`KEYS_SUBSET` - // fallback policy. - // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. - // For any other fallback policy the parameter is not used and should not be set. - // Only values also present in - // :ref:`keys` are allowed, but - // `fallback_keys_subset` cannot be equal to `keys`. - repeated string fallback_keys_subset = 3; - } - - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - - // For each entry, LbEndpoint.Metadata's - // *envoy.lb* namespace is traversed and a subset is created for each unique - // combination of key and value. For example: - // - // .. code-block:: json - // - // { "subset_selectors": [ - // { "keys": [ "version" ] }, - // { "keys": [ "stage", "hardware_type" ] } - // ]} - // - // A subset is matched when the metadata from the selected route and - // weighted cluster contains the same keys and values as the subset's - // metadata. The same host may appear in multiple subsets. - repeated LbSubsetSelector subset_selectors = 3; - - // If true, routing to subsets will take into account the localities and locality weights of the - // endpoints when making the routing decision. - // - // There are some potential pitfalls associated with enabling this feature, as the resulting - // traffic split after applying both a subset match and locality weights might be undesirable. - // - // Consider for example a situation in which you have 50/50 split across two localities X/Y - // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 - // host selected but Y having 100, then a lot more load is being dumped on the single host in X - // than originally anticipated in the load balancing assignment delivered via EDS. - bool locality_weight_aware = 4; - - // When used with locality_weight_aware, scales the weight of each locality by the ratio - // of hosts in the subset vs hosts in the original subset. This aims to even out the load - // going to an individual locality if said locality is disproportionately affected by the - // subset predicate. - bool scale_locality_weight = 5; - - // If true, when a fallback policy is configured and its corresponding subset fails to find - // a host this will cause any host to be selected instead. - // - // This is useful when using the default subset as the fallback policy, given the default - // subset might become empty. With this option enabled, if that happens the LB will attempt - // to select a host from the entire cluster. - bool panic_mode_any = 6; - - // If true, metadata specified for a metadata key will be matched against the corresponding - // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value - // and any of the elements in the list matches the criteria. - bool list_as_any = 7; - } - - // Specific configuration for the LeastRequest load balancing policy. - message LeastRequestLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.LeastRequestLbConfig"; - - // The number of random healthy hosts from which the host with the fewest active requests will - // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; - - // The following formula is used to calculate the dynamic weights when hosts have different load - // balancing weights: - // - // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` - // - // The larger the active request bias is, the more aggressively active requests will lower the - // effective weight when all host weights are not equal. - // - // `active_request_bias` must be greater than or equal to 0.0. - // - // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number - // of active requests at the time it picks a host and behaves like the Round Robin Load - // Balancer. - // - // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing - // weight by the number of active requests at the time it does a pick. - // - // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's - // host sets changes, e.g., whenever there is a host membership update or a host load balancing - // weight change. - // - // .. note:: - // This setting only takes effect if all host weights are not equal. - core.v3.RuntimeDouble active_request_bias = 2; - } - - // Specific configuration for the :ref:`RingHash` - // load balancing policy. - message RingHashLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.RingHashLbConfig"; - - // The hash function used to hash hosts onto the ketama ring. - enum HashFunction { - // Use `xxHash `_, this is the default hash function. - XX_HASH = 0; - - // Use `MurmurHash2 `_, this is compatible with - // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled - // on Linux and not macOS. - MURMUR_HASH_2 = 1; - } - - reserved 2; - - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; - - // The hash function used to hash hosts onto the ketama ring. The value defaults to - // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; - - // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered - // to further constrain resource use. See also - // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; - } - - // Specific configuration for the :ref:`Maglev` - // load balancing policy. - message MaglevLbConfig { - // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. - // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same - // upstream as it was before. Increasing the table size reduces the amount of disruption. - // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. - google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}]; - } - - // Specific configuration for the - // :ref:`Original Destination ` - // load balancing policy. - message OriginalDstLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.OriginalDstLbConfig"; - - // When true, :ref:`x-envoy-original-dst-host - // ` can be used to override destination - // address. - // - // .. attention:: - // - // This header isn't sanitized by default, so enabling this feature allows HTTP clients to - // route traffic to arbitrary hosts and/or ports, which may have serious security - // consequences. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - bool use_http_header = 1; - } - - // Common configuration for all load balancer implementations. - // [#next-free-field: 8] - message CommonLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CommonLbConfig"; - - // Configuration for :ref:`zone aware routing - // `. - message ZoneAwareLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CommonLbConfig.ZoneAwareLbConfig"; - - // Configures percentage of requests that will be considered for zone aware routing - // if zone aware routing is configured. If not specified, the default is 100%. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - type.v3.Percent routing_enabled = 1; - - // Configures minimum upstream cluster size required for zone aware routing - // If upstream cluster size is less than specified, zone aware routing is not performed - // even if zone aware routing is configured. If not specified, the default is 6. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - google.protobuf.UInt64Value min_cluster_size = 2; - - // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic - // mode`. Instead, the cluster will fail all - // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a - // failing service. - bool fail_traffic_on_panic = 3; - } - - // Configuration for :ref:`locality weighted load balancing - // ` - message LocalityWeightedLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; - } - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - message ConsistentHashingLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; - - // If set to `true`, the cluster will use hostname instead of the resolved - // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. - bool use_hostname_for_hashing = 1; - - // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 - // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. - // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. - // Minimum is 100. - // - // Applies to both Ring Hash and Maglev load balancers. - // - // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified - // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests - // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing - // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify - // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the - // cascading overflow effect when choosing the next host in the ring/table). - // - // If weights are specified on the hosts, they are respected. - // - // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts - // being probed, so use a higher value if you require better performance. - google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}]; - } - - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - type.v3.Percent healthy_panic_threshold = 1; - - oneof locality_config_specifier { - ZoneAwareLbConfig zone_aware_lb_config = 2; - - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } - - // If set, all health check/weight/metadata updates that happen within this duration will be - // merged and delivered in one shot when the duration expires. The start of the duration is when - // the first update happens. This is useful for big clusters, with potentially noisy deploys - // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes - // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new - // cluster). Please always keep in mind that the use of sandbox technologies may change this - // behavior. - // - // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge - // window to 0. - // - // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is - // because merging those updates isn't currently safe. See - // https://github.com/envoyproxy/envoy/pull/3941. - google.protobuf.Duration update_merge_window = 4; - - // If set to true, Envoy will :ref:`exclude ` new hosts - // when computing load balancing weights until they have been health checked for the first time. - // This will have no effect unless active health checking is also configured. - bool ignore_new_hosts_until_first_hc = 5; - - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - bool close_connections_on_host_set_change = 6; - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - ConsistentHashingLbConfig consistent_hashing_lb_config = 7; - } - - message RefreshRate { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.RefreshRate"; - - // Specifies the base interval between refreshes. This parameter is required and must be greater - // than zero and less than - // :ref:`max_interval `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {nanos: 1000000} - }]; - - // Specifies the maximum interval between refreshes. This parameter is optional, but must be - // greater than or equal to the - // :ref:`base_interval ` if set. The default - // is 10 times the :ref:`base_interval `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; - } - - message PreconnectPolicy { - // Indicates how many streams (rounded up) can be anticipated per-upstream for each - // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting - // will only be done if the upstream is healthy and the cluster has traffic. - // - // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be - // established, one for the new incoming stream, and one for a presumed follow-up stream. For - // HTTP/2, only one connection would be established by default as one connection can - // serve both the original and presumed follow-up stream. - // - // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 - // active streams, there would be 100 connections in use, and 50 connections preconnected. - // This might be a useful value for something like short lived single-use connections, - // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection - // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP - // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more - // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue - // in case of unexpected disconnects where the connection could not be reused. - // - // If this value is not set, or set explicitly to one, Envoy will fetch as many connections - // as needed to serve streams in flight. This means in steady state if a connection is torn down, - // a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection. - // - // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can - // harm latency more than the preconnecting helps. - google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1 - [(validate.rules).double = {lte: 3.0 gte: 1.0}]; - - // Indicates how many many streams (rounded up) can be anticipated across a cluster for each - // stream, useful for low QPS services. This is currently supported for a subset of - // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). - // Unlike *per_upstream_preconnect_ratio* this preconnects across the upstream instances in a - // cluster, doing best effort predictions of what upstream would be picked next and - // pre-establishing a connection. - // - // Preconnecting will be limited to one preconnect per configured upstream in the cluster and will - // only be done if there are healthy upstreams and the cluster has traffic. - // - // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first - // incoming stream, 2 connections will be preconnected - one to the first upstream for this - // cluster, one to the second on the assumption there will be a follow-up stream. - // - // If this value is not set, or set explicitly to one, Envoy will fetch as many connections - // as needed to serve streams in flight, so during warm up and in steady state if a connection - // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for - // connection establishment. - // - // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, - // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each - // upstream. - google.protobuf.DoubleValue predictive_preconnect_ratio = 2 - [(validate.rules).double = {lte: 3.0 gte: 1.0}]; - } - - reserved 12, 15; - - // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket_match* in the - // :ref:`LbEndpoint.Metadata ` - // is used to match against the transport sockets as they appear in the list. The first - // :ref:`match ` is used. - // For example, with the following match - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "enableMTLS" - // match: - // acceptMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - name: "defaultToPlaintext" - // match: {} - // transport_socket: - // name: envoy.transport_sockets.raw_buffer - // - // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* - // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. - // - // If a :ref:`socket match ` with empty match - // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" - // socket match in case above. - // - // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any - // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - // *transport_socket* specified in this cluster. - // - // This field allows gradual and flexible transport socket configuration changes. - // - // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, - // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", - // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic - // has "acceptPlaintext": "true" metadata information. - // - // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS - // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - // *TransportSocketMatch* in this field. Other client Envoys receive CDS without - // *transport_socket_match* set, and still send plain text traffic to the same cluster. - // - // This field can be used to specify custom transport socket configurations for health - // checks by adding matching key/value pairs in a health check's - // :ref:`transport socket match criteria ` field. - // - // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] - repeated TransportSocketMatch transport_socket_matches = 43; - - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // An optional alternative to the cluster name to be used for observability. This name is used - // emitting stats for the cluster and access logging the cluster name. This will appear as - // additional information in configuration dumps of a cluster's current status as - // :ref:`observability_name ` - // and as an additional tag "upstream_cluster.name" while tracing. Note: access logging using - // this field is presently enabled with runtime feature - // `envoy.reloadable_features.use_observable_cluster_name`. Any ``:`` in the name will be - // converted to ``_`` when emitting statistics. This should not be confused with :ref:`Router - // Filter Header `. - string alt_stat_name = 28 [(udpa.annotations.field_migrate).rename = "observability_name"]; - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - // If not set, a default value of 5s will be used. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 - [(udpa.annotations.security).configure_for_untrusted_upstream = true]; - - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes the *hosts* field in the v2 API. - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // - endpoint.v3.ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.v3.HealthCheck health_checks = 8; - - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - // - // .. attention:: - // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. - google.protobuf.UInt32Value max_requests_per_connection = 9 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional :ref:`circuit breaking ` for the cluster. - CircuitBreakers circuit_breakers = 10; - - // HTTP protocol options that are applied only to upstream HTTP connections. - // These options apply to all HTTP versions. - // This has been deprecated in favor of - // :ref:`upstream_http_protocol_options ` - // in the :ref:`http_protocol_options ` message. - // upstream_http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Additional options when handling HTTP requests upstream. These options will be applicable to - // both HTTP1 and HTTP2 requests. - // This has been deprecated in favor of - // :ref:`common_http_protocol_options ` - // in the :ref:`http_protocol_options ` message. - // common_http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v3.HttpProtocolOptions common_http_protocol_options = 29 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Additional options when handling HTTP1 requests. - // This has been deprecated in favor of http_protocol_options fields in the - // :ref:`http_protocol_options ` message. - // http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v3.Http1ProtocolOptions http_protocol_options = 13 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Even if default HTTP2 protocol options are desired, this field must be - // set so that Envoy will assume that the upstream supports HTTP/2 when - // making new HTTP connection pool connections. Currently, Envoy only - // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - // connections to happen over plain text. - // This has been deprecated in favor of http2_protocol_options fields in the - // :ref:`http_protocol_options ` - // message. http2_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v3.Http2ProtocolOptions http2_protocol_options = 14 [ - deprecated = true, - (udpa.annotations.security).configure_for_untrusted_upstream = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - // [#next-major-version: make this a list of typed extensions.] - map typed_extension_protocol_options = 36; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. The value configured must be at least 1ms. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 - [(validate.rules).duration = {gt {nanos: 1000000}}]; - - // If the DNS failure refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types - // other than :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS` this setting is - // ignored. - RefreshRate dns_failure_refresh_rate = 44; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; - - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; - - // If DNS resolvers are specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used to specify the cluster’s dns resolvers. - // If this setting is not specified, the value defaults to the default - // resolver, which uses /etc/resolv.conf for configuration. For cluster types - // other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - repeated core.v3.Address dns_resolvers = 18 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - bool use_tcp_for_dns_lookups = 45 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v3.DnsResolutionConfig dns_resolution_config = 53; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - core.v3.TypedExtensionConfig typed_dns_resolver_config = 55; - - // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`. - // If true, cluster readiness blocks on warm-up. If false, the cluster will complete - // initialization whether or not warm-up has completed. Defaults to true. - google.protobuf.BoolValue wait_for_warm_on_init = 54; - - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - OutlierDetection outlier_detection = 19; - - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; - - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.v3.BindConfig upstream_bind_config = 21; - - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH`, - // :ref:`MAGLEV` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - - // Optional configuration for the Maglev load balancing policy. - MaglevLbConfig maglev_lb_config = 52; - - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - - // Common configuration for all load balancer implementations. - CommonLbConfig common_lb_config = 27; - - // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`UpstreamTlsContexts ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - core.v3.TransportSocket transport_socket = 24; - - // The Metadata field can be used to provide additional information about the - // cluster. It can be used for stats, logging, and varying filter behavior. - // Fields should use reverse DNS notation to denote which entity within Envoy - // will need the information. For instance, if the metadata is intended for - // the Router filter, the filter name should be specified as *envoy.filters.http.router*. - core.v3.Metadata metadata = 25; - - // Determines how Envoy selects the protocol used to speak to upstream hosts. - // This has been deprecated in favor of setting explicit protocol selection - // in the :ref:`http_protocol_options - // ` message. - // http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - ClusterProtocolSelection protocol_selection = 26 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional options for upstream connections. - UpstreamConnectionOptions upstream_connection_options = 30; - - // If an upstream host becomes unhealthy (as determined by the configured health checks - // or outlier detection), immediately close all connections to the failed host. - // - // .. note:: - // - // This is currently only supported for connections created by tcp_proxy. - // - // .. note:: - // - // The current implementation of this feature closes all connections immediately when - // the unhealthy status is detected. If there are a large number of connections open - // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of - // time exclusively closing these connections, and not processing any other traffic. - bool close_connections_on_host_health_failure = 31; - - // If set to true, Envoy will ignore the health value of a host when processing its removal - // from service discovery. This means that if active health checking is used, Envoy will *not* - // wait for the endpoint to go unhealthy before removing it. - bool ignore_health_on_host_removal = 32; - - // An (optional) network filter chain, listed in the order the filters should be applied. - // The chain will be applied to all outgoing connections that Envoy makes to the upstream - // servers of this cluster. - repeated Filter filters = 40; - - // New mechanism for LB policy configuration. Used only if the - // :ref:`lb_policy` field has the value - // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - LoadBalancingPolicy load_balancing_policy = 41; - - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - core.v3.ConfigSource lrs_server = 42; - - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - // - // .. attention:: - // - // This field has been deprecated in favor of `timeout_budgets`, part of - // :ref:`track_cluster_stats `. - bool track_timeout_budgets = 47 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional customization and configuration of upstream connection pool, and upstream type. - // - // Currently this field only applies for HTTP traffic but is designed for eventual use for custom - // TCP upstreams. - // - // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream - // HTTP, using the http connection pool and the codec from `http2_protocol_options` - // - // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT - // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. - // - // The default pool used is the generic connection pool which creates the HTTP upstream for most - // HTTP requests, and the TCP upstream if CONNECT termination is configured. - // - // If users desire custom connection pool or upstream behavior, for example terminating - // CONNECT only if a custom filter indicates it is appropriate, the custom factories - // can be registered and configured here. - // [#extension-category: envoy.upstreams] - core.v3.TypedExtensionConfig upstream_config = 48; - - // Configuration to track optional cluster stats. - TrackClusterStats track_cluster_stats = 49; - - // Preconnect configuration for this cluster. - PreconnectPolicy preconnect_policy = 50; - - // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate - // connection pool for every downstream connection - bool connection_pool_per_downstream_connection = 51; - - repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = - 11 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - map hidden_envoy_deprecated_extension_protocol_options = 35 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// Extensible load balancing policy configuration. -// -// Every LB policy defined via this mechanism will be identified via a unique name using reverse -// DNS notation. If the policy needs configuration parameters, it must define a message for its -// own configuration, which will be stored in the config field. The name of the policy will tell -// clients which type of message they should expect to see in the config field. -// -// Note that there are cases where it is useful to be able to independently select LB policies -// for choosing a locality and for choosing an endpoint within that locality. For example, a -// given deployment may always use the same policy to choose the locality, but for choosing the -// endpoint within the locality, some clusters may use weighted-round-robin, while others may -// use some sort of session-based balancing. -// -// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a -// child LB policy for each locality. For each request, the parent chooses the locality and then -// delegates to the child policy for that locality to choose the endpoint within the locality. -// -// To facilitate this, the config message for the top-level LB policy may include a field of -// type LoadBalancingPolicy that specifies the child policy. -message LoadBalancingPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LoadBalancingPolicy"; - - message Policy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.LoadBalancingPolicy.Policy"; - - reserved 1, 3; - - reserved "name", "typed_config"; - - core.v3.TypedExtensionConfig typed_extension_config = 4; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Each client will iterate over the list in order and stop at the first policy that it - // supports. This provides a mechanism for starting to use new LB policies that are not yet - // supported by all clients. - repeated Policy policies = 1; -} - -// An extensible structure containing the address Envoy should bind to when -// establishing upstream connections. -message UpstreamBindConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.UpstreamBindConfig"; - - // The address Envoy should bind to when establishing upstream connections. - core.v3.Address source_address = 1; -} - -message UpstreamConnectionOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.UpstreamConnectionOptions"; - - // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - core.v3.TcpKeepalive tcp_keepalive = 1; -} - -message TrackClusterStats { - // If timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - bool timeout_budgets = 1; - - // If request_response_sizes is true, then the :ref:`histograms - // ` tracking header and body sizes - // of requests and responses will be published. - bool request_response_sizes = 2; -} diff --git a/generated_api_shadow/envoy/config/cluster/v3/filter.proto b/generated_api_shadow/envoy/config/cluster/v3/filter.proto deleted file mode 100644 index 7d11b87bcd5d5..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/filter.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v3; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v3"; -option java_outer_classname = "FilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Upstream filters] -// Upstream filters apply to the connections to the upstream cluster hosts. - -message Filter { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.Filter"; - - // The name of the filter to instantiate. The name must match a - // supported upstream filter. Note that Envoy's :ref:`downstream network - // filters ` are not valid upstream filters. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any typed_config = 2; -} diff --git a/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto b/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto deleted file mode 100644 index b19e95db99b74..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto +++ /dev/null @@ -1,157 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v3; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v3"; -option java_outer_classname = "OutlierDetectionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Outlier detection] - -// See the :ref:`architecture overview ` for -// more information on outlier detection. -// [#next-free-field: 22] -message OutlierDetection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.cluster.OutlierDetection"; - - // The number of consecutive 5xx responses or local origin errors that are mapped - // to 5xx error codes before a consecutive 5xx ejection - // occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_5xx = 1; - - // The time interval between ejection analysis sweeps. This can result in - // both new ejections as well as hosts being returned to service. Defaults - // to 10000ms or 10s. - google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; - - // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected and is - // capped by :ref:`max_ejection_time`. - // Defaults to 30000ms or 30s. - google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; - - // The maximum % of an upstream cluster that can be ejected due to outlier - // detection. Defaults to 10% but will eject at least one host regardless of the value. - google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive 5xx. This setting can be used to disable - // ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics. This setting can be used to - // disable ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; - - // The number of hosts in a cluster that must have enough request volume to - // detect success rate outliers. If the number of hosts is less than this - // setting, outlier detection via success rate statistics is not performed - // for any host in the cluster. Defaults to 5. - google.protobuf.UInt32Value success_rate_minimum_hosts = 7; - - // The minimum number of total requests that must be collected in one - // interval (as defined by the interval duration above) to include this host - // in success rate based outlier detection. If the volume is lower than this - // setting, outlier detection via success rate statistics is not performed - // for that host. Defaults to 100. - google.protobuf.UInt32Value success_rate_request_volume = 8; - - // This factor is used to determine the ejection threshold for success rate - // outlier ejection. The ejection threshold is the difference between the - // mean success rate, and the product of this factor and the standard - // deviation of the mean success rate: mean - (stdev * - // success_rate_stdev_factor). This factor is divided by a thousand to get a - // double. That is, if the desired factor is 1.9, the runtime value should - // be 1900. Defaults to 1900. - google.protobuf.UInt32Value success_rate_stdev_factor = 9; - - // The number of consecutive gateway failures (502, 503, 504 status codes) - // before a consecutive gateway failure ejection occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_gateway_failure = 10; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive gateway failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 - [(validate.rules).uint32 = {lte: 100}]; - - // Determines whether to distinguish local origin failures from external errors. If set to true - // the following configuration parameters are taken into account: - // :ref:`consecutive_local_origin_failure`, - // :ref:`enforcing_consecutive_local_origin_failure` - // and - // :ref:`enforcing_local_origin_success_rate`. - // Defaults to false. - bool split_external_local_origin_errors = 12; - - // The number of consecutive locally originated failures before ejection - // occurs. Defaults to 5. Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value consecutive_local_origin_failure = 13; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive locally originated failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics for locally originated errors. - // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 - [(validate.rules).uint32 = {lte: 100}]; - - // The failure percentage to use when determining failure percentage-based outlier detection. If - // the failure percentage of a given host is greater than or equal to this value, it will be - // ejected. Defaults to 85. - google.protobuf.UInt32Value failure_percentage_threshold = 16 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // failure percentage statistics. This setting can be used to disable ejection or to ramp it up - // slowly. Defaults to 0. - // - // [#next-major-version: setting this without setting failure_percentage_threshold should be - // invalid in v4.] - google.protobuf.UInt32Value enforcing_failure_percentage = 17 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // local-origin failure percentage statistics. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 - [(validate.rules).uint32 = {lte: 100}]; - - // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. - // If the total number of hosts in the cluster is less than this value, failure percentage-based - // ejection will not be performed. Defaults to 5. - google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; - - // The minimum number of total requests that must be collected in one interval (as defined by the - // interval duration above) to perform failure percentage-based ejection for this host. If the - // volume is lower than this setting, failure percentage-based ejection will not be performed for - // this host. Defaults to 50. - google.protobuf.UInt32Value failure_percentage_request_volume = 20; - - // The maximum time that a host is ejected for. See :ref:`base_ejection_time` - // for more information. If not specified, the default value (300000ms or 300s) or - // :ref:`base_ejection_time` value is applied, whatever is larger. - google.protobuf.Duration max_ejection_time = 21 [(validate.rules).duration = {gt {}}]; -} diff --git a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD b/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD deleted file mode 100644 index 631cd93a3964e..0000000000000 --- a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto b/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto deleted file mode 100644 index 3941c20aeb805..0000000000000 --- a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto +++ /dev/null @@ -1,85 +0,0 @@ -syntax = "proto3"; - -package envoy.config.common.dynamic_forward_proxy.v2alpha; - -import "envoy/api/v2/cluster.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v2alpha"; -option java_outer_classname = "DnsCacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.common.dynamic_forward_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamic forward proxy common configuration] - -// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview -// ` for more information. -// [#next-free-field: 7] -message DnsCacheConfig { - // The name of the cache. Multiple named caches allow independent dynamic forward proxy - // configurations to operate within a single Envoy process using different configurations. All - // configurations with the same name *must* otherwise have the same settings when referenced - // from different configuration components. Configuration will fail to load if this is not - // the case. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The DNS lookup family to use during resolution. - // - // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy eyeballs" mode. The - // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and - // then configures a host to have a primary and fall back address. With this, we could very - // likely build a "happy eyeballs" connection pool which would race the primary / fall back - // address and return the one that wins. This same method could potentially also be used for - // QUIC to TCP fall back.] - api.v2.Cluster.DnsLookupFamily dns_lookup_family = 2 - [(validate.rules).enum = {defined_only: true}]; - - // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. - // - // .. note: - // - // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be - // added in a future change. - // - // .. note: - // - // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. - google.protobuf.Duration dns_refresh_rate = 3 - [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // The TTL for hosts that are unused. Hosts that have not been used in the configured time - // interval will be purged. If not specified defaults to 5m. - // - // .. note: - // - // The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This - // means that if the configured TTL is shorter than the refresh rate the host may not be removed - // immediately. - // - // .. note: - // - // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. - google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; - - // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum hosts in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; - - // If the DNS failure refresh rate is specified, - // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the dns_refresh_rate. - api.v2.Cluster.RefreshRate dns_failure_refresh_rate = 6; -} diff --git a/generated_api_shadow/envoy/config/common/matcher/v3/BUILD b/generated_api_shadow/envoy/config/common/matcher/v3/BUILD deleted file mode 100644 index 2f90ace882d93..0000000000000 --- a/generated_api_shadow/envoy/config/common/matcher/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto b/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto deleted file mode 100644 index d7deb71d0b469..0000000000000 --- a/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto +++ /dev/null @@ -1,226 +0,0 @@ -syntax = "proto3"; - -package envoy.config.common.matcher.v3; - -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.common.matcher.v3"; -option java_outer_classname = "MatcherProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Unified Matcher API] - -// A matcher, which may traverse a matching tree in order to result in a match action. -// During matching, the tree will be traversed until a match is found, or if no match -// is found the action specified by the most specific on_no_match will be evaluated. -// As an on_no_match might result in another matching tree being evaluated, this process -// might repeat several times until the final OnMatch (or no match) is decided. -// -// [#alpha:] -message Matcher { - // What to do if a match is successful. - message OnMatch { - oneof on_match { - option (validate.required) = true; - - // Nested matcher to evaluate. - // If the nested matcher does not match and does not specify - // on_no_match, then this matcher is considered not to have - // matched, even if a predicate at this level or above returned - // true. - Matcher matcher = 1; - - // Protocol-specific action to take. - core.v3.TypedExtensionConfig action = 2; - } - } - - // A linear list of field matchers. - // The field matchers are evaluated in order, and the first match - // wins. - message MatcherList { - // Predicate to determine if a match is successful. - message Predicate { - // Predicate for a single input field. - message SinglePredicate { - // Protocol-specific specification of input field to match on. - // [#extension-category: envoy.matching.common_inputs] - core.v3.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; - - oneof matcher { - option (validate.required) = true; - - // Built-in string matcher. - type.matcher.v3.StringMatcher value_match = 2; - - // Extension for custom matching logic. - // [#extension-category: envoy.matching.input_matchers] - core.v3.TypedExtensionConfig custom_match = 3; - } - } - - // A list of two or more matchers. Used to allow using a list within a oneof. - message PredicateList { - repeated Predicate predicate = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof match_type { - option (validate.required) = true; - - // A single predicate to evaluate. - SinglePredicate single_predicate = 1; - - // A list of predicates to be OR-ed together. - PredicateList or_matcher = 2; - - // A list of predicates to be AND-ed together. - PredicateList and_matcher = 3; - - // The invert of a predicate - Predicate not_matcher = 4; - } - } - - // An individual matcher. - message FieldMatcher { - // Determines if the match succeeds. - Predicate predicate = 1 [(validate.rules).message = {required: true}]; - - // What to do if the match succeeds. - OnMatch on_match = 2 [(validate.rules).message = {required: true}]; - } - - // A list of matchers. First match wins. - repeated FieldMatcher matchers = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - message MatcherTree { - // A map of configured matchers. Used to allow using a map within a oneof. - message MatchMap { - map map = 1 [(validate.rules).map = {min_pairs: 1}]; - } - - // Protocol-specific specification of input field to match on. - core.v3.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; - - // Exact or prefix match maps in which to look up the input value. - // If the lookup succeeds, the match is considered successful, and - // the corresponding OnMatch is used. - oneof tree_type { - option (validate.required) = true; - - MatchMap exact_match_map = 2; - - // Longest matching prefix wins. - MatchMap prefix_match_map = 3; - - // Extension for custom matching logic. - core.v3.TypedExtensionConfig custom_match = 4; - } - } - - oneof matcher_type { - option (validate.required) = true; - - // A linear list of matchers to evaluate. - MatcherList matcher_list = 1; - - // A match tree to evaluate. - MatcherTree matcher_tree = 2; - } - - // Optional OnMatch to use if the matcher failed. - // If specified, the OnMatch is used, and the matcher is considered - // to have matched. - // If not specified, the matcher is considered not to have matched. - OnMatch on_no_match = 3; -} - -// Match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 11] -message MatchPredicate { - // A set of match configurations used for logical operations. - message MatchSet { - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - - // HTTP request generic body match configuration. - HttpGenericBodyMatch http_request_generic_body_match = 9; - - // HTTP response generic body match configuration. - HttpGenericBodyMatch http_response_generic_body_match = 10; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - // HTTP headers to match. - repeated route.v3.HeaderMatcher headers = 1; -} - -// HTTP generic body match configuration. -// List of text strings and hex strings to be located in HTTP body. -// All specified strings must be found in the HTTP body for positive match. -// The search may be limited to specified number of bytes from the body start. -// -// .. attention:: -// -// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. -// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified -// to scan only part of the http body. -message HttpGenericBodyMatch { - message GenericTextMatch { - oneof rule { - option (validate.required) = true; - - // Text string to be located in HTTP body. - string string_match = 1 [(validate.rules).string = {min_len: 1}]; - - // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; - } - } - - // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). - uint32 bytes_limit = 1; - - // List of patterns to match. - repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD b/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD deleted file mode 100644 index 3aed5a34a4002..0000000000000 --- a/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/service/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto b/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto deleted file mode 100644 index 6db1ecceddc4f..0000000000000 --- a/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.config.common.tap.v2alpha; - -import "envoy/service/tap/v2alpha/common.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.common.tap.v2alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.tap.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common tap extension configuration] - -// Common configuration for all tap extensions. -message CommonExtensionConfig { - oneof config_type { - option (validate.required) = true; - - // If specified, the tap filter will be configured via an admin handler. - AdminConfig admin_config = 1; - - // If specified, the tap filter will be configured via a static configuration that cannot be - // changed. - service.tap.v2alpha.TapConfig static_config = 2; - } -} - -// Configuration for the admin handler. See :ref:`here ` for -// more information. -message AdminConfig { - // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is - // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/BUILD b/generated_api_shadow/envoy/config/core/v3/BUILD deleted file mode 100644 index 72e10b6df8440..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/core/v3/address.proto b/generated_api_shadow/envoy/config/core/v3/address.proto deleted file mode 100644 index 06876d5f8e41e..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/address.proto +++ /dev/null @@ -1,160 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/socket_option.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "AddressProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Network addresses] - -message Pipe { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Pipe"; - - // Unix Domain Socket path. On Linux, paths starting with '@' will use the - // abstract namespace. The starting '@' is replaced by a null byte by Envoy. - // Paths starting with '@' will result in an error in environments other than - // Linux. - string path = 1 [(validate.rules).string = {min_len: 1}]; - - // The mode for the Pipe. Not applicable for abstract sockets. - uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; -} - -// [#not-implemented-hide:] The address represents an envoy internal listener. -// TODO(lambdai): Make this address available for listener and endpoint. -// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. -message EnvoyInternalAddress { - oneof address_name_specifier { - option (validate.required) = true; - - // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. - string server_listener_name = 1; - } -} - -// [#next-free-field: 7] -message SocketAddress { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketAddress"; - - enum Protocol { - TCP = 0; - UDP = 1; - } - - Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; - - // The address for this socket. :ref:`Listeners ` will bind - // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` - // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: - // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used - // within an upstream :ref:`BindConfig `, the address - // controls the source address of outbound connections. For :ref:`clusters - // `, the cluster type determines whether the - // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized - // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_len: 1}]; - - oneof port_specifier { - option (validate.required) = true; - - uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; - - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. - string named_port = 4; - } - - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. - string resolver_name = 5; - - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. - bool ipv4_compat = 6; -} - -message TcpKeepalive { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TcpKeepalive"; - - // Maximum number of keepalive probes to send without response before deciding - // the connection is dead. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 9.) - google.protobuf.UInt32Value keepalive_probes = 1; - - // The number of seconds a connection needs to be idle before keep-alive probes - // start being sent. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 7200s (i.e., 2 hours.) - google.protobuf.UInt32Value keepalive_time = 2; - - // The number of seconds between keep-alive probes. Default is to use the OS - // level configuration (unless overridden, Linux defaults to 75s.) - google.protobuf.UInt32Value keepalive_interval = 3; -} - -message BindConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BindConfig"; - - // The address to bind to when creating a socket. - SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; - - // Whether to set the *IP_FREEBIND* option when creating the socket. When this - // flag is set to true, allows the :ref:`source_address - // ` to be an IP address - // that is not configured on the system running Envoy. When this flag is set - // to false, the option *IP_FREEBIND* is disabled on the socket. When this - // flag is not set (default), the socket is not modified, i.e. the option is - // neither enabled nor disabled. - google.protobuf.BoolValue freebind = 2; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated SocketOption socket_options = 3; -} - -// Addresses specify either a logical or physical address and port, which are -// used to tell Envoy where to bind/listen, connect to upstream and find -// management servers. -message Address { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Address"; - - oneof address { - option (validate.required) = true; - - SocketAddress socket_address = 1; - - Pipe pipe = 2; - - // [#not-implemented-hide:] - EnvoyInternalAddress envoy_internal_address = 3; - } -} - -// CidrRange specifies an IP Address and a prefix length to construct -// the subnet mask for a `CIDR `_ range. -message CidrRange { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.CidrRange"; - - // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/backoff.proto b/generated_api_shadow/envoy/config/core/v3/backoff.proto deleted file mode 100644 index 3ffa97bb0299c..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/backoff.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "BackoffProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Backoff Strategy] - -// Configuration defining a jittered exponential back off strategy. -message BackoffStrategy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BackoffStrategy"; - - // The base interval to be used for the next back off computation. It should - // be greater than zero and less than or equal to :ref:`max_interval - // `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, - // but must be greater than or equal to the :ref:`base_interval - // ` if set. The default - // is 10 times the :ref:`base_interval - // `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/base.proto b/generated_api_shadow/envoy/config/core/v3/base.proto deleted file mode 100644 index 9b1ca815723b2..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/base.proto +++ /dev/null @@ -1,456 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/backoff.proto"; -import "envoy/config/core/v3/http_uri.proto"; -import "envoy/type/v3/percent.proto"; -import "envoy/type/v3/semantic_version.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/context_params.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "BaseProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common types] - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -enum RoutingPriority { - DEFAULT = 0; - HIGH = 1; -} - -// HTTP request method. -enum RequestMethod { - METHOD_UNSPECIFIED = 0; - GET = 1; - HEAD = 2; - POST = 3; - PUT = 4; - DELETE = 5; - CONNECT = 6; - OPTIONS = 7; - TRACE = 8; - PATCH = 9; -} - -// Identifies the direction of the traffic relative to the local Envoy. -enum TrafficDirection { - // Default option is unspecified. - UNSPECIFIED = 0; - - // The transport is used for incoming traffic. - INBOUND = 1; - - // The transport is used for outgoing traffic. - OUTBOUND = 2; -} - -// Identifies location of where either Envoy runs or where upstream hosts run. -message Locality { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Locality"; - - // Region this :ref:`zone ` belongs to. - string region = 1; - - // Defines the local service zone where Envoy is running. Though optional, it - // should be set if discovery service routing is used and the discovery - // service exposes :ref:`zone data `, - // either in this message or via :option:`--service-zone`. The meaning of zone - // is context dependent, e.g. `Availability Zone (AZ) - // `_ - // on AWS, `Zone `_ on - // GCP, etc. - string zone = 2; - - // When used for locality of upstream hosts, this field further splits zone - // into smaller chunks of sub-zones so they can be load balanced - // independently. - string sub_zone = 3; -} - -// BuildVersion combines SemVer version of extension with free-form build information -// (i.e. 'alpha', 'private-build') as a set of strings. -message BuildVersion { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BuildVersion"; - - // SemVer version of extension. - type.v3.SemanticVersion version = 1; - - // Free-form build information. - // Envoy defines several well known keys in the source/common/version/version.h file - google.protobuf.Struct metadata = 2; -} - -// Version and identification for an Envoy extension. -// [#next-free-field: 6] -message Extension { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Extension"; - - // This is the name of the Envoy filter as specified in the Envoy - // configuration, e.g. envoy.filters.http.router, com.acme.widget. - string name = 1; - - // Category of the extension. - // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" - // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from - // acme.com vendor. - // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] - string category = 2; - - // [#not-implemented-hide:] Type descriptor of extension configuration proto. - // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] - // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] - string type_descriptor = 3; - - // The version is a property of the extension and maintained independently - // of other extensions and the Envoy API. - // This field is not set when extension did not provide version information. - BuildVersion version = 4; - - // Indicates that the extension is present but was disabled via dynamic configuration. - bool disabled = 5; -} - -// Identifies a specific Envoy instance. The node identifier is presented to the -// management server, which may use this identifier to distinguish per Envoy -// configuration for serving. -// [#next-free-field: 13] -message Node { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Node"; - - // An opaque node identifier for the Envoy node. This also provides the local - // service node name. It should be set if any of the following features are - // used: :ref:`statsd `, :ref:`CDS - // `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-node`. - string id = 1; - - // Defines the local service cluster name where Envoy is running. Though - // optional, it should be set if any of the following features are used: - // :ref:`statsd `, :ref:`health check cluster - // verification - // `, - // :ref:`runtime override directory `, - // :ref:`user agent addition - // `, - // :ref:`HTTP global rate limiting `, - // :ref:`CDS `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-cluster`. - string cluster = 2; - - // Opaque metadata extending the node identifier. Envoy will pass this - // directly to the management server. - google.protobuf.Struct metadata = 3; - - // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike - // other fields in this message). For example, the xDS client may have a shard identifier that - // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the - // dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic - // parameter then appears in this field during future discovery requests. - map dynamic_parameters = 12; - - // Locality specifying where the Envoy instance is running. - Locality locality = 4; - - // Free-form string that identifies the entity requesting config. - // E.g. "envoy" or "grpc" - string user_agent_name = 6; - - oneof user_agent_version_type { - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - string user_agent_version = 7; - - // Structured version of the entity requesting config. - BuildVersion user_agent_build_version = 8; - } - - // List of extensions and their versions supported by the node. - repeated Extension extensions = 9; - - // Client feature support list. These are well known features described - // in the Envoy API repository for a given major version of an API. Client features - // use reverse DNS naming scheme, for example `com.acme.feature`. - // See :ref:`the list of features ` that xDS client may - // support. - repeated string client_features = 10; - - // Known listening ports on the node as a generic hint to the management server - // for filtering :ref:`listeners ` to be returned. For example, - // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. - repeated Address listening_addresses = 11 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - string hidden_envoy_deprecated_build_version = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// Metadata provides additional inputs to filters based on matched listeners, -// filter chains, routes and endpoints. It is structured as a map, usually from -// filter name (in reverse DNS format) to metadata specific to the filter. Metadata -// key-values for a filter are merged as connection and request handling occurs, -// with later values for the same key overriding earlier values. -// -// An example use of metadata is providing additional values to -// http_connection_manager in the envoy.http_connection_manager.access_log -// namespace. -// -// Another example use of metadata is to per service config info in cluster metadata, which may get -// consumed by multiple filters. -// -// For load balancing, Metadata provides a means to subset cluster endpoints. -// Endpoints have a Metadata object associated and routes contain a Metadata -// object to match against. There are some well defined metadata used today for -// this purpose: -// -// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an -// endpoint and is also used during header processing -// (x-envoy-upstream-canary) and for stats purposes. -// [#next-major-version: move to type/metadata/v2] -message Metadata { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Metadata"; - - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - // If both *filter_metadata* and - // :ref:`typed_filter_metadata ` - // fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. - map filter_metadata = 1; - - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - // The value is encoded as google.protobuf.Any. - // If both :ref:`filter_metadata ` - // and *typed_filter_metadata* fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. - map typed_filter_metadata = 2; -} - -// Runtime derived uint32 with a default when not specified. -message RuntimeUInt32 { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeUInt32"; - - // Default value if runtime value is not available. - uint32 default_value = 2; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived percentage with a default when not specified. -message RuntimePercent { - // Default value if runtime value is not available. - type.v3.Percent default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived double with a default when not specified. -message RuntimeDouble { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeDouble"; - - // Default value if runtime value is not available. - double default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived bool with a default when not specified. -message RuntimeFeatureFlag { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.RuntimeFeatureFlag"; - - // Default value if runtime value is not available. - google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key to get value for comparison. This value is used if defined. The boolean value must - // be represented via its - // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Header name/value pair. -message HeaderValue { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValue"; - - // Header name. - string key = 1 - [(validate.rules).string = - {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Header value. - // - // The same :ref:`format specifier ` as used for - // :ref:`HTTP access logging ` applies here, however - // unknown header values are replaced with the empty string instead of `-`. - string value = 2 [ - (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; -} - -// Header name/value pair plus option to control append behavior. -message HeaderValueOption { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HeaderValueOption"; - - // Header name/value pair that this option applies to. - HeaderValue header = 1 [(validate.rules).message = {required: true}]; - - // Should the value be appended? If true (default), the value is appended to - // existing values. Otherwise it replaces any existing values. - google.protobuf.BoolValue append = 2; -} - -// Wrapper for a set of headers. -message HeaderMap { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderMap"; - - repeated HeaderValue headers = 1; -} - -// A directory that is watched for changes, e.g. by inotify on Linux. Move/rename -// events inside this directory trigger the watch. -message WatchedDirectory { - // Directory path to watch. - string path = 1 [(validate.rules).string = {min_len: 1}]; -} - -// Data source consisting of either a file or an inline value. -message DataSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.DataSource"; - - oneof specifier { - option (validate.required) = true; - - // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_len: 1}]; - - // Bytes inlined in the configuration. - bytes inline_bytes = 2; - - // String inlined in the configuration. - string inline_string = 3; - } -} - -// The message specifies the retry policy of remote data source when fetching fails. -message RetryPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RetryPolicy"; - - // Specifies parameters that control :ref:`retry backoff strategy `. - // This parameter is optional, in which case the default base interval is 1000 milliseconds. The - // default maximum interval is 10 times the base interval. - BackoffStrategy retry_back_off = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. - google.protobuf.UInt32Value num_retries = 2 - [(udpa.annotations.field_migrate).rename = "max_retries"]; -} - -// The message specifies how to fetch data from remote and how to verify it. -message RemoteDataSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RemoteDataSource"; - - // The HTTP URI to fetch the remote data. - HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; - - // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_len: 1}]; - - // Retry policy for fetching remote data. - RetryPolicy retry_policy = 3; -} - -// Async data source which support async data fetch. -message AsyncDataSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.AsyncDataSource"; - - oneof specifier { - option (validate.required) = true; - - // Local async data source. - DataSource local = 1; - - // Remote async data source. - RemoteDataSource remote = 2; - } -} - -// Configuration for transport socket in :ref:`listeners ` and -// :ref:`clusters `. If the configuration is -// empty, a default transport socket implementation and configuration will be -// chosen based on the platform and existence of tls_context. -message TransportSocket { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TransportSocket"; - - // The name of the transport socket to instantiate. The name must match a supported transport - // socket implementation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Implementation specific configuration which depends on the implementation being instantiated. - // See the supported transport socket implementations for further documentation. - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not -// specified via a runtime key. -// -// .. note:: -// -// Parsing of the runtime key's data is implemented such that it may be represented as a -// :ref:`FractionalPercent ` proto represented as JSON/YAML -// and may also be represented as an integer with the assumption that the value is an integral -// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse -// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. -message RuntimeFractionalPercent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.RuntimeFractionalPercent"; - - // Default value if the runtime value's for the numerator/denominator keys are not available. - type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key for a YAML representation of a FractionalPercent. - string runtime_key = 2; -} - -// Identifies a specific ControlPlane instance that Envoy is connected to. -message ControlPlane { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ControlPlane"; - - // An opaque control plane identifier that uniquely identifies an instance - // of control plane. This can be used to identify which control plane instance, - // the Envoy is connected to. - string identifier = 1; -} diff --git a/generated_api_shadow/envoy/config/core/v3/config_source.proto b/generated_api_shadow/envoy/config/core/v3/config_source.proto deleted file mode 100644 index c24a0a6537d85..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/config_source.proto +++ /dev/null @@ -1,216 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/authority.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ConfigSourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Configuration sources] - -// xDS API and non-xDS services version. This is used to describe both resource and transport -// protocol versions (in distinct configuration fields). -enum ApiVersion { - // When not specified, we assume v2, to ease migration to Envoy's stable API - // versioning. If a client does not support v2 (e.g. due to deprecation), this - // is an invalid value. - AUTO = 0 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; - - // Use xDS v2 API. - V2 = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; - - // Use xDS v3 API. - V3 = 2; -} - -// API configuration source. This identifies the API type and cluster that Envoy -// will use to fetch an xDS API. -// [#next-free-field: 9] -message ApiConfigSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ApiConfigSource"; - - // APIs may be fetched via either REST or gRPC. - enum ApiType { - // Ideally this would be 'reserved 0' but one can't reserve the default - // value. Instead we throw an exception if this is ever used. - hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY = 0 [ - deprecated = true, - (envoy.annotations.disallowed_by_default_enum) = true, - (envoy.annotations.deprecated_at_minor_version_enum) = "3.0" - ]; - - // REST-JSON v2 API. The `canonical JSON encoding - // `_ for - // the v2 protos is used. - REST = 1; - - // SotW gRPC service. - GRPC = 2; - - // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} - // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state - // with every update, the xDS server only sends what has changed since the last update. - DELTA_GRPC = 3; - - // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be - // multiplexed on a single connection to an ADS endpoint. - // [#not-implemented-hide:] - AGGREGATED_GRPC = 5; - - // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be - // multiplexed on a single connection to an ADS endpoint. - // [#not-implemented-hide:] - AGGREGATED_DELTA_GRPC = 6; - } - - // API type (gRPC, REST, delta gRPC) - ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; - - // Cluster names should be used only with REST. If > 1 - // cluster is defined, clusters will be cycled through if any kind of failure - // occurs. - // - // .. note:: - // - // The cluster with name ``cluster_name`` must be statically defined and its - // type must not be ``EDS``. - repeated string cluster_names = 2; - - // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - // services will be cycled through if any kind of failure occurs. - repeated GrpcService grpc_services = 4; - - // For REST APIs, the delay between successive polls. - google.protobuf.Duration refresh_delay = 3; - - // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; - - // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be - // rate limited. - RateLimitSettings rate_limit_settings = 6; - - // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. - bool set_node_on_first_message_only = 7; -} - -// Aggregated Discovery Service (ADS) options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that ADS is to be used. -message AggregatedConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.AggregatedConfigSource"; -} - -// [#not-implemented-hide:] -// Self-referencing config source options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that other data can be obtained from the same server. -message SelfConfigSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SelfConfigSource"; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; -} - -// Rate Limit settings to be applied for discovery requests made by Envoy. -message RateLimitSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.RateLimitSettings"; - - // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a - // default value of 100 will be used. - google.protobuf.UInt32Value max_tokens = 1; - - // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens - // per second will be used. - google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; -} - -// Configuration for :ref:`listeners `, :ref:`clusters -// `, :ref:`routes -// `, :ref:`endpoints -// ` etc. may either be sourced from the -// filesystem or from an xDS API source. Filesystem configs are watched with -// inotify for updates. -// [#next-free-field: 8] -message ConfigSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; - - // Authorities that this config source may be used for. An authority specified in a xdstp:// URL - // is resolved to a *ConfigSource* prior to configuration fetch. This field provides the - // association between authority name and configuration source. - // [#not-implemented-hide:] - repeated xds.core.v3.Authority authorities = 7; - - oneof config_source_specifier { - option (validate.required) = true; - - // Path on the filesystem to source and watch for configuration updates. - // When sourcing configuration for :ref:`secret `, - // the certificate and key files are also watched for updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - string path = 1; - - // API configuration source. - ApiConfigSource api_config_source = 2; - - // When set, ADS will be used to fetch resources. The ADS API configuration - // source in the bootstrap configuration is used. - AggregatedConfigSource ads = 3; - - // [#not-implemented-hide:] - // When set, the client will access the resources from the same server it got the - // ConfigSource from, although not necessarily from the same stream. This is similar to the - // :ref:`ads` field, except that the client may use a - // different stream to the same server. As a result, this field can be used for things - // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) - // LDS to RDS on the same server without requiring the management server to know its name - // or required credentials. - // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since - // this field can implicitly mean to use the same stream in the case where the ConfigSource - // is provided via ADS and the specified data can also be obtained via ADS.] - SelfConfigSource self = 5; - } - - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - google.protobuf.Duration initial_fetch_timeout = 4; - - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. - ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/event_service_config.proto b/generated_api_shadow/envoy/config/core/v3/event_service_config.proto deleted file mode 100644 index b3552e3975a36..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/event_service_config.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "EventServiceConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#not-implemented-hide:] -// Configuration of the event reporting service endpoint. -message EventServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.EventServiceConfig"; - - oneof config_source_specifier { - option (validate.required) = true; - - // Specifies the gRPC service that hosts the event reporting service. - GrpcService grpc_service = 1; - } -} diff --git a/generated_api_shadow/envoy/config/core/v3/extension.proto b/generated_api_shadow/envoy/config/core/v3/extension.proto deleted file mode 100644 index ba66da6a8e363..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/extension.proto +++ /dev/null @@ -1,61 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/config_source.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ExtensionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Extension configuration] - -// Message type for extension configuration. -// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. -message TypedExtensionConfig { - // The name of an extension. This is not used to select the extension, instead - // it serves the role of an opaque identifier. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The typed config for the extension. The type URL will be used to identify - // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, - // the inner type URL of *TypedStruct* will be utilized. See the - // :ref:`extension configuration overview - // ` for further details. - google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; -} - -// Configuration source specifier for a late-bound extension configuration. The -// parent resource is warmed until all the initial extension configurations are -// received, unless the flag to apply the default configuration is set. -// Subsequent extension updates are atomic on a per-worker basis. Once an -// extension configuration is applied to a request or a connection, it remains -// constant for the duration of processing. If the initial delivery of the -// extension configuration fails, due to a timeout for example, the optional -// default configuration is applied. Without a default configuration, the -// extension is disabled, until an extension configuration is received. The -// behavior of a disabled extension depends on the context. For example, a -// filter chain with a disabled extension filter rejects all incoming streams. -message ExtensionConfigSource { - ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; - - // Optional default configuration to use as the initial configuration if - // there is a failure to receive the initial extension configuration or if - // `apply_default_config_without_warming` flag is set. - google.protobuf.Any default_config = 2; - - // Use the default config as the initial configuration without warming and - // waiting for the first discovery response. Requires the default configuration - // to be supplied. - bool apply_default_config_without_warming = 3; - - // A set of permitted extension type URLs. Extension configuration updates are rejected - // if they do not match any type URL in the set. - repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto b/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto deleted file mode 100644 index e79ec24e0201f..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "GrpcMethodListProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC method list] - -// A list of gRPC methods which can be used as an allowlist, for example. -message GrpcMethodList { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcMethodList"; - - message Service { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcMethodList.Service"; - - // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The names of the gRPC methods in this service. - repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; - } - - repeated Service services = 1; -} diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto deleted file mode 100644 index b8e033da93830..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ /dev/null @@ -1,296 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "GrpcServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC services] - -// gRPC service configuration. This is used by :ref:`ApiConfigSource -// ` and filter configurations. -// [#next-free-field: 6] -message GrpcService { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService"; - - message EnvoyGrpc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.EnvoyGrpc"; - - // The name of the upstream gRPC cluster. SSL credentials will be supplied - // in the :ref:`Cluster ` :ref:`transport_socket - // `. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. - // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. - string authority = 2 - [(validate.rules).string = - {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // [#next-free-field: 9] - message GoogleGrpc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc"; - - // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. - message SslCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials"; - - // PEM encoded server root certificates. - DataSource root_certs = 1; - - // PEM encoded client private key. - DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // PEM encoded client certificate chain. - DataSource cert_chain = 3; - } - - // Local channel credentials. Only UDS is supported for now. - // See https://github.com/grpc/grpc/pull/15909. - message GoogleLocalCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials"; - } - - // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call - // credential types. - message ChannelCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials"; - - oneof credential_specifier { - option (validate.required) = true; - - SslCredentials ssl_credentials = 1; - - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_default = 2; - - GoogleLocalCredentials local_credentials = 3; - } - } - - // [#next-free-field: 8] - message CallCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials"; - - message ServiceAccountJWTAccessCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials." - "ServiceAccountJWTAccessCredentials"; - - string json_key = 1; - - uint64 token_lifetime_seconds = 2; - } - - message GoogleIAMCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials"; - - string authorization_token = 1; - - string authority_selector = 2; - } - - message MetadataCredentialsFromPlugin { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials." - "MetadataCredentialsFromPlugin"; - - string name = 1; - - // [#extension-category: envoy.grpc_credentials] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - // Security token service configuration that allows Google gRPC to - // fetch security token from an OAuth 2.0 authorization server. - // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and - // https://github.com/grpc/grpc/pull/19587. - // [#next-free-field: 10] - message StsService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService"; - - // URI of the token exchange service that handles token exchange requests. - // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - // https://github.com/envoyproxy/protoc-gen-validate/issues/303] - string token_exchange_service_uri = 1; - - // Location of the target service or resource where the client - // intends to use the requested security token. - string resource = 2; - - // Logical name of the target service where the client intends to - // use the requested security token. - string audience = 3; - - // The desired scope of the requested security token in the - // context of the service or resource where the token will be used. - string scope = 4; - - // Type of the requested security token. - string requested_token_type = 5; - - // The path of subject token, a security token that represents the - // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; - - // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; - - // The path of actor token, a security token that represents the identity - // of the acting party. The acting party is authorized to use the - // requested security token and act on behalf of the subject. - string actor_token_path = 8; - - // Type of the actor token. - string actor_token_type = 9; - } - - oneof credential_specifier { - option (validate.required) = true; - - // Access token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. - string access_token = 1; - - // Google Compute Engine credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_compute_engine = 2; - - // Google refresh token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. - string google_refresh_token = 3; - - // Service Account JWT Access credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. - ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; - - // Google IAM credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. - GoogleIAMCredentials google_iam = 5; - - // Custom authenticator credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. - // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. - MetadataCredentialsFromPlugin from_plugin = 6; - - // Custom security token service which implements OAuth 2.0 token exchange. - // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 - // See https://github.com/grpc/grpc/pull/19587. - StsService sts_service = 7; - } - } - - // Channel arguments. - message ChannelArgs { - message Value { - // Pointer values are not supported, since they don't make any sense when - // delivered via the API. - oneof value_specifier { - option (validate.required) = true; - - string string_value = 1; - - int64 int_value = 2; - } - } - - // See grpc_types.h GRPC_ARG #defines for keys that work here. - map args = 1; - } - - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_len: 1}]; - - ChannelCredentials channel_credentials = 2; - - // A set of call credentials that can be composed with `channel credentials - // `_. - repeated CallCredentials call_credentials = 3; - - // The human readable prefix to use when emitting statistics for the gRPC - // service. - // - // .. csv-table:: - // :header: Name, Type, Description - // :widths: 1, 1, 2 - // - // streams_total, Counter, Total number of streams opened - // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; - - // The name of the Google gRPC credentials factory to use. This must have been registered with - // Envoy. If this is empty, a default credentials factory will be used that sets up channel - // credentials based on other configuration parameters. - string credentials_factory_name = 5; - - // Additional configuration for site-specific customizations of the Google - // gRPC library. - google.protobuf.Struct config = 6; - - // How many bytes each stream can buffer internally. - // If not set an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; - - // Custom channels args. - ChannelArgs channel_args = 8; - } - - reserved 4; - - oneof target_specifier { - option (validate.required) = true; - - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - EnvoyGrpc envoy_grpc = 1; - - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - GoogleGrpc google_grpc = 2; - } - - // The timeout for the gRPC request. This is the timeout for a specific - // request. - google.protobuf.Duration timeout = 3; - - // Additional metadata to include in streams initiated to the GrpcService. This can be used for - // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to - // be injected. For more information, including details on header value syntax, see the - // documentation on :ref:`custom request headers - // `. - repeated HeaderValue initial_metadata = 5; -} diff --git a/generated_api_shadow/envoy/config/core/v3/health_check.proto b/generated_api_shadow/envoy/config/core/v3/health_check.proto deleted file mode 100644 index dc7adc97a3257..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/health_check.proto +++ /dev/null @@ -1,377 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/event_service_config.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/v3/http.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Health check] -// * Health checking :ref:`architecture overview `. -// * If health checking is configured for a cluster, additional statistics are emitted. They are -// documented :ref:`here `. - -// Endpoint health status. -enum HealthStatus { - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0; - - // Healthy. - HEALTHY = 1; - - // Unhealthy. - UNHEALTHY = 2; - - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3; - - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - TIMEOUT = 4; - - // Degraded. - DEGRADED = 5; -} - -// [#next-free-field: 25] -message HealthCheck { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; - - // Describes the encoding of the payload bytes in the payload. - message Payload { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.Payload"; - - oneof payload { - option (validate.required) = true; - - // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_len: 1}]; - - // [#not-implemented-hide:] Binary payload. - bytes binary = 2; - } - } - - // [#next-free-field: 12] - message HttpHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.HttpHealthCheck"; - - // The value of the host header in the HTTP health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The host header can be customized for a specific endpoint by setting the - // :ref:`hostname ` field. - string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Specifies the HTTP path that will be requested during health checking. For example - // */healthcheck*. - string path = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // [#not-implemented-hide:] HTTP specific payload. - Payload send = 3; - - // [#not-implemented-hide:] HTTP specific response. - Payload receive = 4; - - // Specifies a list of HTTP headers that should be added to each request that is sent to the - // health checked cluster. For more information, including details on header value syntax, see - // the documentation on :ref:`custom request headers - // `. - repeated HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request that is sent to the - // health checked cluster. - repeated string request_headers_to_remove = 8 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default - // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - // semantics of :ref:`Int64Range `. The start and end of each - // range are required. Only statuses in the range [100, 600) are allowed. - repeated type.v3.Int64Range expected_statuses = 9; - - // Use specified application protocol for health checks. - type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; - - // An optional service name parameter which is used to validate the identity of - // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview - // ` for more information. - type.matcher.v3.StringMatcher service_name_matcher = 11; - - string hidden_envoy_deprecated_service_name = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - bool hidden_envoy_deprecated_use_http2 = 7 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - message TcpHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.TcpHealthCheck"; - - // Empty payloads imply a connect-only health check. - Payload send = 1; - - // When checking the response, “fuzzy” matching is performed such that each - // binary block must be found, and in the order specified, but not - // necessarily contiguous. - repeated Payload receive = 2; - } - - message RedisHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.RedisHealthCheck"; - - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; - } - - // `grpc.health.v1.Health - // `_-based - // healthcheck. See `gRPC doc `_ - // for details. - message GrpcHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.GrpcHealthCheck"; - - // An optional service name parameter which will be sent to gRPC service in - // `grpc.health.v1.HealthCheckRequest - // `_. - // message. See `gRPC health-checking overview - // `_ for more information. - string service_name = 1; - - // The value of the :authority header in the gRPC health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The authority header can be customized for a specific endpoint by setting - // the :ref:`hostname ` field. - string authority = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // Custom health check. - message CustomHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.CustomHealthCheck"; - - // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A custom health checker specific configuration which depends on the custom health checker - // being instantiated. See :api:`envoy/config/health_checker` for reference. - // [#extension-category: envoy.health_checkers] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - // Health checks occur over the transport socket specified for the cluster. This implies that if a - // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. - // - // This allows overriding the cluster TLS settings, just for health check connections. - message TlsOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.TlsOptions"; - - // Specifies the ALPN protocols for health check connections. This is useful if the - // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks - // versus data connections. If empty, no ALPN protocols will be set on health check connections. - repeated string alpn_protocols = 1; - } - - reserved 10; - - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - google.protobuf.Duration timeout = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // The interval between health checks. - google.protobuf.Duration interval = 2 [(validate.rules).duration = { - required: true - gt {} - }]; - - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - google.protobuf.Duration initial_jitter = 20; - - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - google.protobuf.Duration interval_jitter = 3; - - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - uint32 interval_jitter_percent = 18; - - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; - - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Non-serving port for health checking. - google.protobuf.UInt32Value alt_port = 6; - - // Reuse health check connection between health checks. Default is true. - google.protobuf.BoolValue reuse_connection = 7; - - oneof health_checker { - option (validate.required) = true; - - // HTTP health check. - HttpHealthCheck http_health_check = 8; - - // TCP health check. - TcpHealthCheck tcp_health_check = 9; - - // gRPC health check. - GrpcHealthCheck grpc_health_check = 11; - - // Custom health check. - CustomHealthCheck custom_health_check = 13; - } - - // The "no traffic interval" is a special health check interval that is used when a cluster has - // never had traffic routed to it. This lower interval allows cluster information to be kept up to - // date, without sending a potentially large amount of active health checking traffic for no - // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. Note that this interval takes precedence over - // any other. - // - // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; - - // The "no traffic healthy interval" is a special health check interval that - // is used for hosts that are currently passing active health checking - // (including new hosts) when the cluster has received no traffic. - // - // This is useful for when we want to send frequent health checks with - // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once - // a host in the cluster is marked as healthy. - // - // Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. - // - // If no_traffic_healthy_interval is not set, it will default to the - // no traffic interval and send that interval regardless of health state. - google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy interval" is a health check interval that is used for hosts that are marked as - // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the - // standard health check interval that is defined. - // - // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as unhealthy. For subsequent health checks - // Envoy will shift back to using either "unhealthy interval" if present or the standard health - // check interval that is defined. - // - // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; - - // The "healthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as healthy. For subsequent health checks - // Envoy will shift back to using the standard health check interval that is defined. - // - // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; - - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - string event_log_path = 17; - - // [#not-implemented-hide:] - // The gRPC service for the health check event service. - // If empty, health check events won't be sent to a remote endpoint. - EventServiceConfig event_service = 22; - - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - bool always_log_health_check_failures = 19; - - // This allows overriding the cluster TLS settings, just for health check connections. - TlsOptions tls_options = 21; - - // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's - // :ref:`tranport socket matches `. - // For example, the following match criteria - // - // .. code-block:: yaml - // - // transport_socket_match_criteria: - // useMTLS: true - // - // Will match the following :ref:`cluster socket match ` - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "useMTLS" - // match: - // useMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the - // :ref:`LbEndpoint.Metadata `. - // This allows using different transport socket capabilities for health checking versus proxying to the - // endpoint. - // - // If the key/values pairs specified do not match any - // :ref:`transport socket matches `, - // the cluster's :ref:`transport socket ` - // will be used for health check socket configuration. - google.protobuf.Struct transport_socket_match_criteria = 23; -} diff --git a/generated_api_shadow/envoy/config/core/v3/http_uri.proto b/generated_api_shadow/envoy/config/core/v3/http_uri.proto deleted file mode 100644 index 5d1fc239e07ed..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/http_uri.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "HttpUriProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP Service URI ] - -// Envoy external URI descriptor -message HttpUri { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpUri"; - - // The HTTP server URI. It should be a full FQDN with protocol, host and path. - // - // Example: - // - // .. code-block:: yaml - // - // uri: https://www.googleapis.com/oauth2/v1/certs - // - string uri = 1 [(validate.rules).string = {min_len: 1}]; - - // Specify how `uri` is to be fetched. Today, this requires an explicit - // cluster, but in the future we may support dynamic cluster creation or - // inline DNS resolution. See `issue - // `_. - oneof http_upstream_type { - option (validate.required) = true; - - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // - string cluster = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 [(validate.rules).duration = { - required: true - gte {} - }]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto deleted file mode 100644 index 8f2347eb55179..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ /dev/null @@ -1,494 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/extension.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Protocol options] - -// [#not-implemented-hide:] -message TcpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.TcpProtocolOptions"; -} - -// QUIC protocol options which apply to both downstream and upstream connections. -message QuicProtocolOptions { - // Maximum number of streams that the client can negotiate per connection. 100 - // if not specified. - google.protobuf.UInt32Value max_concurrent_streams = 1; - - // `Initial stream-level flow-control receive window - // `_ size. Valid values range from - // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16). - // - // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. - // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the stream buffers. - google.protobuf.UInt32Value initial_stream_window_size = 2 - [(validate.rules).uint32 = {lte: 16777216 gte: 1}]; - - // Similar to *initial_stream_window_size*, but for connection-level - // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16). - // window. Currently, this has the same minimum/default as *initial_stream_window_size*. - // - // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default - // window size now, so it's also the minimum. - google.protobuf.UInt32Value initial_connection_window_size = 3 - [(validate.rules).uint32 = {lte: 25165824 gte: 1}]; -} - -message UpstreamHttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.UpstreamHttpProtocolOptions"; - - // Set transport socket `SNI `_ for new - // upstream connections based on the downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - bool auto_sni = 1; - - // Automatic validate upstream presented certificate for new upstream connections based on the - // downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - // This field is intended to set with `auto_sni` field. - bool auto_san_validation = 2; -} - -// Configures the alternate protocols cache which tracks alternate protocols that can be used to -// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for -// HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 -// for the "HTTPS" DNS resource record. -message AlternateProtocolsCacheOptions { - // The name of the cache. Multiple named caches allow independent alternate protocols cache - // configurations to operate within a single Envoy process using different configurations. All - // alternate protocols cache options with the same name *must* be equal in all fields when - // referenced from different configuration components. Configuration will fail to load if this is - // not the case. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The maximum number of entries that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum entries in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; -} - -// [#next-free-field: 7] -message HttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HttpProtocolOptions"; - - // Action to take when Envoy receives client request with header names containing underscore - // characters. - // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented - // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore - // characters. - enum HeadersWithUnderscoresAction { - // Allow headers with underscores. This is the default behavior. - ALLOW = 0; - - // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests - // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter - // is incremented for each rejected request. - REJECT_REQUEST = 1; - - // Drop the header with name containing underscores. The header is dropped before the filter chain is - // invoked and as such filters will not see dropped headers. The - // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. - DROP_HEADER = 2; - } - - // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. When the - // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 - // downstream connection a drain sequence will occur prior to closing the connection, see - // :ref:`drain_timeout - // `. - // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled for downstream connections according to the value for - // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. - google.protobuf.Duration idle_timeout = 1; - - // The maximum duration of a connection. The duration is defined as a period since a connection - // was established. If not set, there is no max duration. When max_connection_duration is reached - // the connection will be closed. Drain sequence will occur prior to closing the connection if - // if's applicable. See :ref:`drain_timeout - // `. - // Note: not implemented for upstream connections. - google.protobuf.Duration max_connection_duration = 3; - - // The maximum number of headers. If unconfigured, the default - // maximum number of request headers allowed is 100. Requests that exceed this limit will receive - // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be - // reset independent of any other timeouts. If not specified, this value is not set. - google.protobuf.Duration max_stream_duration = 4; - - // Action to take when a client request with a header name containing underscore characters is received. - // If this setting is not specified, the value defaults to ALLOW. - // Note: upstream responses are not affected by this setting. - HeadersWithUnderscoresAction headers_with_underscores_action = 5; - - // Optional maximum requests for both upstream and downstream connections. - // If not specified, there is no limit. - // Setting this parameter to 1 will effectively disable keep alive. - // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. - google.protobuf.UInt32Value max_requests_per_connection = 6; -} - -// [#next-free-field: 8] -message Http1ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http1ProtocolOptions"; - - // [#next-free-field: 9] - message HeaderKeyFormat { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat"; - - message ProperCaseWords { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords"; - } - - oneof header_format { - option (validate.required) = true; - - // Formats the header by proper casing words: the first character and any character following - // a special character will be capitalized if it's an alpha character. For example, - // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". - // Note that while this results in most headers following conventional casing, certain headers - // are not covered. For example, the "TE" header will be formatted as "Te". - ProperCaseWords proper_case_words = 1; - - // Configuration for stateful formatter extensions that allow using received headers to - // affect the output of encoding headers. E.g., preserving case during proxying. - // [#extension-category: envoy.http.stateful_header_formatters] - TypedExtensionConfig stateful_formatter = 8; - } - } - - // Handle HTTP requests with absolute URLs in the requests. These requests - // are generally sent by clients to forward/explicit proxies. This allows clients to configure - // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - // *http_proxy* environment variable. - google.protobuf.BoolValue allow_absolute_url = 1; - - // Handle incoming HTTP/1.0 and HTTP 0.9 requests. - // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 - // style connect logic, dechunking, and handling lack of client host iff - // *default_host_for_http_10* is configured. - bool accept_http_10 = 2; - - // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as - // Envoy does not otherwise support HTTP/1.0 without a Host header. - // This is a no-op if *accept_http_10* is not true. - string default_host_for_http_10 = 3; - - // Describes how the keys for response headers should be formatted. By default, all header keys - // are lower cased. - HeaderKeyFormat header_key_format = 4; - - // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. - // - // .. attention:: - // - // Note that this only happens when Envoy is chunk encoding which occurs when: - // - The request is HTTP/1.1. - // - Is neither a HEAD only request nor a HTTP Upgrade. - // - Not a response to a HEAD request. - // - The content length header is not present. - bool enable_trailers = 5; - - // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding` - // headers set. By default such messages are rejected, but if option is enabled - Envoy will - // remove Content-Length header and process message. - // See `RFC7230, sec. 3.3.3 ` for details. - // - // .. attention:: - // Enabling this option might lead to request smuggling vulnerability, especially if traffic - // is proxied via multiple layers of proxies. - bool allow_chunked_length = 6; - - // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate - // HTTP/1.1 connections upon receiving an invalid HTTP message. However, - // when this option is true, then Envoy will leave the HTTP/1.1 connection - // open where possible. - // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // `. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; -} - -message KeepaliveSettings { - // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. - // If this is zero, interval PINGs will not be sent. - google.protobuf.Duration interval = 1 [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // How long to wait for a response to a keepalive PING. If a response is not received within this - // time period, the connection will be aborted. - google.protobuf.Duration timeout = 2 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // A random jitter amount as a percentage of interval that will be added to each interval. - // A value of zero means there will be no jitter. - // The default value is 15%. - type.v3.Percent interval_jitter = 3; - - // If the connection has been idle for this duration, send a HTTP/2 ping ahead - // of new stream creation, to quickly detect dead connections. - // If this is zero, this type of PING will not be sent. - // If an interval ping is outstanding, a second ping will not be sent as the - // interval ping will determine if the connection is dead. - google.protobuf.Duration connection_idle_interval = 4 - [(validate.rules).duration = {gte {nanos: 1000000}}]; -} - -// [#next-free-field: 16] -message Http2ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http2ProtocolOptions"; - - // Defines a parameter to be sent in the SETTINGS frame. - // See `RFC7540, sec. 6.5.1 `_ for details. - message SettingsParameter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter"; - - // The 16 bit parameter identifier. - google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65535 gte: 0}, - (validate.rules).message = {required: true} - ]; - - // The 32 bit parameter value. - google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; - } - - // `Maximum table size `_ - // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values - // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header - // compression. - google.protobuf.UInt32Value hpack_table_size = 1; - - // `Maximum concurrent streams `_ - // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) - // and defaults to 2147483647. - // - // For upstream connections, this also limits how many streams Envoy will initiate concurrently - // on a single connection. If the limit is reached, Envoy may queue requests or establish - // additional connections (as allowed per circuit breaker limits). - // - // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given - // connection based on upstream settings. Config dumps will reflect the configured upper bound, - // not the per-connection negotiated limits. - google.protobuf.UInt32Value max_concurrent_streams = 2 - [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; - - // `Initial stream-level flow-control window - // `_ size. Valid values range from 65535 - // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 - // (256 * 1024 * 1024). - // - // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default - // window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the codec buffers. - google.protobuf.UInt32Value initial_stream_window_size = 3 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Similar to *initial_stream_window_size*, but for connection-level flow-control - // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. - google.protobuf.UInt32Value initial_connection_window_size = 4 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Allows proxying Websocket and other upgrades over H2 connect. - bool allow_connect = 5; - - // [#not-implemented-hide:] Hiding until envoy has full metadata support. - // Still under implementation. DO NOT USE. - // - // Allows metadata. See [metadata - // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more - // information. - bool allow_metadata = 6; - - // Limit the number of pending outbound downstream frames of all types (frames that are waiting to - // be written into the socket). Exceeding this limit triggers flood mitigation and connection is - // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due - // to flood mitigation. The default limit is 10000. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, - // preventing high memory utilization when receiving continuous stream of these frames. Exceeding - // this limit triggers flood mitigation and connection is terminated. The - // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood - // mitigation. The default limit is 1000. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an - // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but - // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` - // stat tracks the number of connections terminated due to flood mitigation. - // Setting this to 0 will terminate connection upon receiving first frame with an empty payload - // and no end stream flag. The default limit is 1. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; - - // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number - // of PRIORITY frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // max_inbound_priority_frames_per_stream * (1 + opened_streams) - // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when - // Envoy receives complete response headers from the upstream server. For upstream connection the - // `opened_streams` is incremented when Envoy send the HEADERS frame for a new stream. The - // ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 100. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; - - // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number - // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // 5 + 2 * (opened_streams + - // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) - // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when - // Envoy receives complete response headers from the upstream server. For upstream connections the - // `opened_streams` is incremented when Envoy sends the HEADERS frame for a new stream. The - // ``http2.inbound_priority_frames_flood`` stat tracks the number of connections terminated due to - // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. - // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, - // but more complex implementations that try to estimate available bandwidth require at least 2. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 - [(validate.rules).uint32 = {gte: 1}]; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging - // ` - // iff present. - // - // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message - // ` - // - // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // ` - // - // See `RFC7540, sec. 8.1 `_ for details. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; - - // [#not-implemented-hide:] - // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: - // - // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by - // Envoy. - // - // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field - // 'allow_connect'. - // - // Note that custom parameters specified through this field can not also be set in the - // corresponding named parameters: - // - // .. code-block:: text - // - // ID Field Name - // ---------------- - // 0x1 hpack_table_size - // 0x3 max_concurrent_streams - // 0x4 initial_stream_window_size - // - // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies - // between custom parameters with the same identifier will trigger a failure. - // - // See `IANA HTTP/2 Settings - // `_ for - // standardized identifiers. - repeated SettingsParameter custom_settings_parameters = 13; - - // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer - // does not respond within the configured timeout, the connection will be aborted. - KeepaliveSettings connection_keepalive = 15; -} - -// [#not-implemented-hide:] -message GrpcProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcProtocolOptions"; - - Http2ProtocolOptions http2_protocol_options = 1; -} - -// A message which allows using HTTP/3. -message Http3ProtocolOptions { - QuicProtocolOptions quic_protocol_options = 1; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // `. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 2; -} - -// A message to control transformations to the :scheme header -message SchemeHeaderTransformation { - oneof transformation { - // Overwrite any Scheme header with the contents of this string. - string scheme_to_overwrite = 1 [(validate.rules).string = {in: "http" in: "https"}]; - } -} diff --git a/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto b/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto deleted file mode 100644 index 40b33f33ff5b3..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Proxy Protocol] - -message ProxyProtocolConfig { - enum Version { - // PROXY protocol version 1. Human readable format. - V1 = 0; - - // PROXY protocol version 2. Binary format. - V2 = 1; - } - - // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details - Version version = 1; -} diff --git a/generated_api_shadow/envoy/config/core/v3/resolver.proto b/generated_api_shadow/envoy/config/core/v3/resolver.proto deleted file mode 100644 index 21d40425f7a6b..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/resolver.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/address.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ResolverProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Resolver] - -// Configuration of DNS resolver option flags which control the behavior of the DNS resolver. -message DnsResolverOptions { - // Use TCP for all DNS queries instead of the default protocol UDP. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only uses UDP for DNS resolution. - bool use_tcp_for_dns_lookups = 1; - - // Do not use the default search domains; only query hostnames as-is or as aliases. - bool no_default_search_domain = 2; -} - -// DNS resolution configuration which includes the underlying dns resolver addresses and options. -message DnsResolutionConfig { - // A list of dns resolver addresses. If specified, the DNS client library will perform resolution - // via the underlying DNS resolvers. Otherwise, the default system resolvers - // (e.g., /etc/resolv.conf) will be used. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. - repeated Address resolvers = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. - DnsResolverOptions dns_resolver_options = 2; -} diff --git a/generated_api_shadow/envoy/config/core/v3/socket_option.proto b/generated_api_shadow/envoy/config/core/v3/socket_option.proto deleted file mode 100644 index b22169b86aeb8..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/socket_option.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "SocketOptionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Socket Option ] - -// Generic socket option message. This would be used to set socket options that -// might not exist in upstream kernels or precompiled Envoy binaries. -// [#next-free-field: 7] -message SocketOption { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketOption"; - - enum SocketState { - // Socket options are applied after socket creation but before binding the socket to a port - STATE_PREBIND = 0; - - // Socket options are applied after binding the socket to a port but before calling listen() - STATE_BOUND = 1; - - // Socket options are applied after calling listen() - STATE_LISTENING = 2; - } - - // An optional name to give this socket option for debugging, etc. - // Uniqueness is not required and no special meaning is assumed. - string description = 1; - - // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - int64 level = 2; - - // The numeric name as passed to setsockopt - int64 name = 3; - - oneof value { - option (validate.required) = true; - - // Because many sockopts take an int value. - int64 int_value = 4; - - // Otherwise it's a byte buffer. - bytes buf_value = 5; - } - - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto deleted file mode 100644 index b2a1c5e13ee43..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto +++ /dev/null @@ -1,114 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; - -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "SubstitutionFormatStringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Substitution format string] - -// Configuration to use multiple :ref:`command operators ` -// to generate a new string in either plain text or JSON format. -// [#next-free-field: 7] -message SubstitutionFormatString { - oneof format { - option (validate.required) = true; - - // Specify a format with command operators to form a text string. - // Its details is described in :ref:`format string`. - // - // For example, setting ``text_format`` like below, - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // generates plain text similar to: - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - // Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field. - string text_format = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Specify a format with command operators to form a JSON string. - // Its details is described in :ref:`format dictionary`. - // Values are rendered as strings, numbers, or boolean values as appropriate. - // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). - // See the documentation for a specific command operator for details. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // json_format: - // status: "%RESPONSE_CODE%" - // message: "%LOCAL_REPLY_BODY%" - // - // The following JSON object would be created: - // - // .. code-block:: json - // - // { - // "status": 500, - // "message": "My error message" - // } - // - google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; - - // Specify a format with command operators to form a text string. - // Its details is described in :ref:`format string`. - // - // For example, setting ``text_format`` like below, - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format_source: - // inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // generates plain text similar to: - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - DataSource text_format_source = 5; - } - - // If set to true, when command operators are evaluated to null, - // - // * for ``text_format``, the output of the empty operator is changed from ``-`` to an - // empty string, so that empty values are omitted entirely. - // * for ``json_format`` the keys with null values are omitted in the output structure. - bool omit_empty_values = 3; - - // Specify a *content_type* field. - // If this field is not set then ``text/plain`` is used for *text_format* and - // ``application/json`` is used for *json_format*. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // content_type: "text/html; charset=UTF-8" - // - string content_type = 4; - - // Specifies a collection of Formatter plugins that can be called from the access log configuration. - // See the formatters extensions documentation for details. - // [#extension-category: envoy.formatter] - repeated TypedExtensionConfig formatters = 6; -} diff --git a/generated_api_shadow/envoy/config/core/v3/udp_socket_config.proto b/generated_api_shadow/envoy/config/core/v3/udp_socket_config.proto deleted file mode 100644 index 00033eabdb8af..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/udp_socket_config.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "UdpSocketConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: UDP socket config] - -// Generic UDP socket configuration. -message UdpSocketConfig { - // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate - // more memory per socket. Received datagrams above this size will be dropped. If not set - // defaults to 1500 bytes. - google.protobuf.UInt64Value max_rx_datagram_size = 1 - [(validate.rules).uint64 = {lt: 65536 gt: 0}]; - - // Configures whether Generic Receive Offload (GRO) - // _ is preferred when reading from the - // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. - // This option affects performance but not functionality. If GRO is not supported by the operating - // system, non-GRO receive will be used. - google.protobuf.BoolValue prefer_gro = 2; -} diff --git a/generated_api_shadow/envoy/config/endpoint/v3/BUILD b/generated_api_shadow/envoy/config/endpoint/v3/BUILD deleted file mode 100644 index 7cde9465f0911..0000000000000 --- a/generated_api_shadow/envoy/config/endpoint/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/endpoint:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto deleted file mode 100644 index afcaa41134c41..0000000000000 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; - -package envoy.config.endpoint.v3; - -import "envoy/config/endpoint/v3/endpoint_components.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; -option java_outer_classname = "EndpointProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Endpoint configuration] -// Endpoint discovery :ref:`architecture overview ` - -// Each route from RDS will map to a single cluster or traffic split across -// clusters using weights expressed in the RDS WeightedCluster. -// -// With EDS, each cluster is treated independently from a LB perspective, with -// LB taking place between the Localities within a cluster and at a finer -// granularity between the hosts within a locality. The percentage of traffic -// for each endpoint is determined by both its load_balancing_weight, and the -// load_balancing_weight of its locality. First, a locality will be selected, -// then an endpoint within that locality will be chose based on its weight. -// [#next-free-field: 6] -message ClusterLoadAssignment { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment"; - - // Load balancing policy settings. - // [#next-free-field: 6] - message Policy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ClusterLoadAssignment.Policy"; - - // [#not-implemented-hide:] - message DropOverload { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; - - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_len: 1}]; - - // Percentage of traffic that should be dropped for the category. - type.v3.FractionalPercent drop_percentage = 2; - } - - reserved 1; - - // Action to trim the overall incoming traffic to protect the upstream - // hosts. This action allows protection in case the hosts are unable to - // recover from an outage, or unable to autoscale or unable to handle - // incoming traffic volume for any reason. - // - // At the client each category is applied one after the other to generate - // the 'actual' drop percentage on all outgoing traffic. For example: - // - // .. code-block:: json - // - // { "drop_overloads": [ - // { "category": "throttle", "drop_percentage": 60 } - // { "category": "lb", "drop_percentage": 50 } - // ]} - // - // The actual drop percentages applied to the traffic at the clients will be - // "throttle"_drop = 60% - // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. - // actual_outgoing_load = 20% // remaining after applying all categories. - // [#not-implemented-hide:] - repeated DropOverload drop_overloads = 2; - - // Priority levels and localities are considered overprovisioned with this - // factor (in percentage). This means that we don't consider a priority - // level or locality unhealthy until the fraction of healthy hosts - // multiplied by the overprovisioning factor drops below 100. - // With the default value 140(1.4), Envoy doesn't consider a priority level - // or a locality unhealthy until their percentage of healthy hosts drops - // below 72%. For example: - // - // .. code-block:: json - // - // { "overprovisioning_factor": 100 } - // - // Read more at :ref:`priority levels ` and - // :ref:`localities `. - google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; - - // The max time until which the endpoints from this assignment can be used. - // If no new assignments are received before this time expires the endpoints - // are considered stale and should be marked unhealthy. - // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; - - bool hidden_envoy_deprecated_disable_overprovisioning = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Name of the cluster. This will be the :ref:`service_name - // ` value if specified - // in the cluster :ref:`EdsClusterConfig - // `. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // List of endpoints to load balance to. - repeated LocalityLbEndpoints endpoints = 2; - - // Map of named endpoints that can be referenced in LocalityLbEndpoints. - // [#not-implemented-hide:] - map named_endpoints = 5; - - // Load balancing policy settings. - Policy policy = 4; -} diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto deleted file mode 100644 index 1faf64e20c2c6..0000000000000 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto +++ /dev/null @@ -1,188 +0,0 @@ -syntax = "proto3"; - -package envoy.config.endpoint.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/health_check.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; -option java_outer_classname = "EndpointComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Endpoints] - -// Upstream host identifier. -message Endpoint { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.Endpoint"; - - // The optional health check configuration. - message HealthCheckConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.Endpoint.HealthCheckConfig"; - - // Optional alternative health check port value. - // - // By default the health check address port of an upstream host is the same - // as the host's serving address port. This provides an alternative health - // check port. Setting this with a non-zero value allows an upstream host - // to have different health check address port. - uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; - - // By default, the host header for L7 health checks is controlled by cluster level configuration - // (see: :ref:`host ` and - // :ref:`authority `). Setting this - // to a non-empty value allows overriding the cluster level configuration for a specific - // endpoint. - string hostname = 2; - } - - // The upstream host address. - // - // .. attention:: - // - // The form of host address depends on the given cluster type. For STATIC or EDS, - // it is expected to be a direct IP address (or something resolvable by the - // specified :ref:`resolver ` - // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, - // and will be resolved via DNS. - core.v3.Address address = 1; - - // The optional health check configuration is used as configuration for the - // health checker to contact the health checked host. - // - // .. attention:: - // - // This takes into effect only for upstream clusters with - // :ref:`active health checking ` enabled. - HealthCheckConfig health_check_config = 2; - - // The hostname associated with this endpoint. This hostname is not used for routing or address - // resolution. If provided, it will be associated with the endpoint, and can be used for features - // that require a hostname, like - // :ref:`auto_host_rewrite `. - string hostname = 3; -} - -// An Endpoint that Envoy can route traffic to. -// [#next-free-field: 6] -message LbEndpoint { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LbEndpoint"; - - // Upstream host identifier or a named reference. - oneof host_identifier { - Endpoint endpoint = 1; - - // [#not-implemented-hide:] - string endpoint_name = 5; - } - - // Optional health status when known and supplied by EDS server. - core.v3.HealthStatus health_status = 2; - - // The endpoint metadata specifies values that may be used by the load - // balancer to select endpoints in a cluster for a given request. The filter - // name should be specified as *envoy.lb*. An example boolean key-value pair - // is *canary*, providing the optional canary status of the upstream host. - // This may be matched against in a route's - // :ref:`RouteAction ` metadata_match field - // to subset the endpoints considered in cluster load balancing. - core.v3.Metadata metadata = 3; - - // The optional load balancing weight of the upstream host; at least 1. - // Envoy uses the load balancing weight in some of the built in load - // balancers. The load balancing weight for an endpoint is divided by the sum - // of the weights of all endpoints in the endpoint's locality to produce a - // percentage of traffic for the endpoint. This percentage is then further - // weighted by the endpoint's locality's load balancing weight from - // LocalityLbEndpoints. If unspecified, each host is presumed to have equal - // weight in a locality. The sum of the weights of all endpoints in the - // endpoint's locality must not exceed uint32_t maximal value (4294967295). - google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; -} - -// [#not-implemented-hide:] -// A configuration for a LEDS collection. -message LedsClusterLocalityConfig { - // Configuration for the source of LEDS updates for a Locality. - core.v3.ConfigSource leds_config = 1; - - // The xDS transport protocol glob collection resource name. - // The service is only supported in delta xDS (incremental) mode. - string leds_collection_name = 2; -} - -// A group of endpoints belonging to a Locality. -// One can have multiple LocalityLbEndpoints for a locality, but this is -// generally only done if the different groups need to have different load -// balancing weights or different priorities. -// [#next-free-field: 9] -message LocalityLbEndpoints { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.LocalityLbEndpoints"; - - // [#not-implemented-hide:] - // A list of endpoints of a specific locality. - message LbEndpointList { - repeated LbEndpoint lb_endpoints = 1; - } - - // Identifies location of where the upstream hosts run. - core.v3.Locality locality = 1; - - // The group of endpoints belonging to the locality specified. - // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be - // deprecated and replaced by *load_balancer_endpoints*.] - repeated LbEndpoint lb_endpoints = 2; - - // [#not-implemented-hide:] - oneof lb_config { - // The group of endpoints belonging to the locality. - // [#comment:TODO(adisuissa): Once LEDS is implemented the *lb_endpoints* field - // needs to be deprecated.] - LbEndpointList load_balancer_endpoints = 7; - - // LEDS Configuration for the current locality. - LedsClusterLocalityConfig leds_cluster_locality_config = 8; - } - - // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load - // balancing weight for a locality is divided by the sum of the weights of all - // localities at the same priority level to produce the effective percentage - // of traffic for the locality. The sum of the weights of all localities at - // the same priority level must not exceed uint32_t maximal value (4294967295). - // - // Locality weights are only considered when :ref:`locality weighted load - // balancing ` is - // configured. These weights are ignored otherwise. If no weights are - // specified when locality weighted load balancing is enabled, the locality is - // assigned no load. - google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Optional: the priority for this LocalityLbEndpoints. If unspecified this will - // default to the highest priority (0). - // - // Under usual circumstances, Envoy will only select endpoints for the highest - // priority (0). In the event all endpoints for a particular priority are - // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the - // next highest priority group. - // - // Priorities should range from 0 (highest) to N (lowest) without skipping. - uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; - - // Optional: Per locality proximity value which indicates how close this - // locality is from the source locality. This value only provides ordering - // information (lower the value, closer it is to the source locality). - // This will be consumed by load balancing schemes that need proximity order - // to determine where to route the requests. - // [#not-implemented-hide:] - google.protobuf.UInt32Value proximity = 6; -} diff --git a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto deleted file mode 100644 index c114fa726622d..0000000000000 --- a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto +++ /dev/null @@ -1,167 +0,0 @@ -syntax = "proto3"; - -package envoy.config.endpoint.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; -option java_outer_classname = "LoadReportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Load Report] - -// These are stats Envoy reports to the management server at a frequency defined by -// :ref:`LoadStatsResponse.load_reporting_interval`. -// Stats per upstream region/zone and optionally per subzone. -// [#next-free-field: 9] -message UpstreamLocalityStats { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.UpstreamLocalityStats"; - - // Name of zone, region and optionally endpoint group these metrics were - // collected from. Zone and region names could be empty if unknown. - core.v3.Locality locality = 1; - - // The total number of requests successfully completed by the endpoints in the - // locality. - uint64 total_successful_requests = 2; - - // The total number of unfinished requests - uint64 total_requests_in_progress = 3; - - // The total number of requests that failed due to errors at the endpoint, - // aggregated over all endpoints in the locality. - uint64 total_error_requests = 4; - - // The total number of requests that were issued by this Envoy since - // the last report. This information is aggregated over all the - // upstream endpoints in the locality. - uint64 total_issued_requests = 8; - - // Stats for multi-dimensional load balancing. - repeated EndpointLoadMetricStats load_metric_stats = 5; - - // Endpoint granularity stats information for this locality. This information - // is populated if the Server requests it by setting - // :ref:`LoadStatsResponse.report_endpoint_granularity`. - repeated UpstreamEndpointStats upstream_endpoint_stats = 7; - - // [#not-implemented-hide:] The priority of the endpoint group these metrics - // were collected from. - uint32 priority = 6; -} - -// [#next-free-field: 8] -message UpstreamEndpointStats { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.UpstreamEndpointStats"; - - // Upstream host address. - core.v3.Address address = 1; - - // Opaque and implementation dependent metadata of the - // endpoint. Envoy will pass this directly to the management server. - google.protobuf.Struct metadata = 6; - - // The total number of requests successfully completed by the endpoints in the - // locality. These include non-5xx responses for HTTP, where errors - // originate at the client and the endpoint responded successfully. For gRPC, - // the grpc-status values are those not covered by total_error_requests below. - uint64 total_successful_requests = 2; - - // The total number of unfinished requests for this endpoint. - uint64 total_requests_in_progress = 3; - - // The total number of requests that failed due to errors at the endpoint. - // For HTTP these are responses with 5xx status codes and for gRPC the - // grpc-status values: - // - // - DeadlineExceeded - // - Unimplemented - // - Internal - // - Unavailable - // - Unknown - // - DataLoss - uint64 total_error_requests = 4; - - // The total number of requests that were issued to this endpoint - // since the last report. A single TCP connection, HTTP or gRPC - // request or stream is counted as one request. - uint64 total_issued_requests = 7; - - // Stats for multi-dimensional load balancing. - repeated EndpointLoadMetricStats load_metric_stats = 5; -} - -message EndpointLoadMetricStats { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.EndpointLoadMetricStats"; - - // Name of the metric; may be empty. - string metric_name = 1; - - // Number of calls that finished and included this metric. - uint64 num_requests_finished_with_metric = 2; - - // Sum of metric values across all calls that finished with this metric for - // load_reporting_interval. - double total_metric_value = 3; -} - -// Per cluster load stats. Envoy reports these stats a management server in a -// :ref:`LoadStatsRequest` -// Next ID: 7 -// [#next-free-field: 7] -message ClusterStats { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.ClusterStats"; - - message DroppedRequests { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.ClusterStats.DroppedRequests"; - - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_len: 1}]; - - // Total number of deliberately dropped requests for the category. - uint64 dropped_count = 2; - } - - // The name of the cluster. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The eds_cluster_config service_name of the cluster. - // It's possible that two clusters send the same service_name to EDS, - // in that case, the management server is supposed to do aggregation on the load reports. - string cluster_service_name = 6; - - // Need at least one. - repeated UpstreamLocalityStats upstream_locality_stats = 2 - [(validate.rules).repeated = {min_items: 1}]; - - // Cluster-level stats such as total_successful_requests may be computed by - // summing upstream_locality_stats. In addition, below there are additional - // cluster-wide stats. - // - // The total number of dropped requests. This covers requests - // deliberately dropped by the drop_overload policy and circuit breaking. - uint64 total_dropped_requests = 3; - - // Information about deliberately dropped requests for each category specified - // in the DropOverload policy. - repeated DroppedRequests dropped_requests = 5; - - // Period over which the actual load report occurred. This will be guaranteed to include every - // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy - // and the *LoadStatsResponse* message sent from the management server, this may be longer than - // the requested load reporting interval in the *LoadStatsResponse*. - google.protobuf.Duration load_report_interval = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/README.md b/generated_api_shadow/envoy/config/filter/README.md deleted file mode 100644 index 6ec297d6bc77b..0000000000000 --- a/generated_api_shadow/envoy/config/filter/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Protocol buffer definitions for filters. - -Visibility of the definitions should be constrained to none except for -shared definitions between explicitly enumerated filters (e.g. accesslog and fault definitions). diff --git a/generated_api_shadow/envoy/config/filter/accesslog/v2/BUILD b/generated_api_shadow/envoy/config/filter/accesslog/v2/BUILD deleted file mode 100644 index f7c626ac0e5a7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/accesslog/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto b/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto deleted file mode 100644 index 25d27bfbd1064..0000000000000 --- a/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto +++ /dev/null @@ -1,256 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.accesslog.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.accesslog.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common access log types] - -message AccessLog { - // The name of the access log implementation to instantiate. The name must - // match a statically registered access log. Current built-in loggers include: - // - // #. "envoy.access_loggers.file" - // #. "envoy.access_loggers.http_grpc" - // #. "envoy.access_loggers.tcp_grpc" - string name = 1; - - // Filter which is used to determine if the access log needs to be written. - AccessLogFilter filter = 2; - - // Custom configuration that depends on the access log being instantiated. Built-in - // configurations include: - // - // #. "envoy.access_loggers.file": :ref:`FileAccessLog - // ` - // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig - // ` - // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig - // ` - oneof config_type { - google.protobuf.Struct config = 3 [deprecated = true]; - - google.protobuf.Any typed_config = 4; - } -} - -// [#next-free-field: 12] -message AccessLogFilter { - oneof filter_specifier { - option (validate.required) = true; - - // Status code filter. - StatusCodeFilter status_code_filter = 1; - - // Duration filter. - DurationFilter duration_filter = 2; - - // Not health check filter. - NotHealthCheckFilter not_health_check_filter = 3; - - // Traceable filter. - TraceableFilter traceable_filter = 4; - - // Runtime filter. - RuntimeFilter runtime_filter = 5; - - // And filter. - AndFilter and_filter = 6; - - // Or filter. - OrFilter or_filter = 7; - - // Header filter. - HeaderFilter header_filter = 8; - - // Response flag filter. - ResponseFlagFilter response_flag_filter = 9; - - // gRPC status filter. - GrpcStatusFilter grpc_status_filter = 10; - - // Extension filter. - ExtensionFilter extension_filter = 11; - } -} - -// Filter on an integer comparison. -message ComparisonFilter { - enum Op { - // = - EQ = 0; - - // >= - GE = 1; - - // <= - LE = 2; - } - - // Comparison operator. - Op op = 1 [(validate.rules).enum = {defined_only: true}]; - - // Value to compare against. - api.v2.core.RuntimeUInt32 value = 2; -} - -// Filters on HTTP response/status code. -message StatusCodeFilter { - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters on total request duration in milliseconds. -message DurationFilter { - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters for requests that are not health check requests. A health check -// request is marked by the health check filter. -message NotHealthCheckFilter { -} - -// Filters for requests that are traceable. See the tracing overview for more -// information on how a request becomes traceable. -message TraceableFilter { -} - -// Filters for random sampling of requests. -message RuntimeFilter { - // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - // If found in runtime, this value will replace the default numerator. - string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. - type.FractionalPercent percent_sampled = 2; - - // By default, sampling pivots on the header - // :ref:`x-request-id` being present. If - // :ref:`x-request-id` is present, the filter will - // consistently sample across multiple hosts based on the runtime key value and the value - // extracted from :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - // on the runtime key value alone. *use_independent_randomness* can be used for logging kill - // switches within complex nested :ref:`AndFilter - // ` and :ref:`OrFilter - // ` blocks that are easier to reason about - // from a probability perspective (i.e., setting to true will cause the filter to behave like - // an independent random variable when composed within logical operator filters). - bool use_independent_randomness = 3; -} - -// Performs a logical “and” operation on the result of each filter in filters. -// Filters are evaluated sequentially and if one of them returns false, the -// filter returns false immediately. -message AndFilter { - repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// Performs a logical “or” operation on the result of each individual filter. -// Filters are evaluated sequentially and if one of them returns true, the -// filter returns true immediately. -message OrFilter { - repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; -} - -// Filters requests based on the presence or value of a request header. -message HeaderFilter { - // Only requests with a header which matches the specified HeaderMatcher will pass the filter - // check. - api.v2.route.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; -} - -// Filters requests that received responses with an Envoy response flag set. -// A list of the response flags can be found -// in the access log formatter :ref:`documentation`. -message ResponseFlagFilter { - // Only responses with the any of the flags listed in this field will be logged. - // This field is optional. If it is not specified, then any response flag will pass - // the filter check. - repeated string flags = 1 [(validate.rules).repeated = { - items { - string { - in: "LH" - in: "UH" - in: "UT" - in: "LR" - in: "UR" - in: "UF" - in: "UC" - in: "UO" - in: "NR" - in: "DI" - in: "FI" - in: "RL" - in: "UAEX" - in: "RLSE" - in: "DC" - in: "URX" - in: "SI" - in: "IH" - in: "DPE" - } - } - }]; -} - -// Filters gRPC requests based on their response status. If a gRPC status is not provided, the -// filter will infer the status from the HTTP status code. -message GrpcStatusFilter { - enum Status { - OK = 0; - CANCELED = 1; - UNKNOWN = 2; - INVALID_ARGUMENT = 3; - DEADLINE_EXCEEDED = 4; - NOT_FOUND = 5; - ALREADY_EXISTS = 6; - PERMISSION_DENIED = 7; - RESOURCE_EXHAUSTED = 8; - FAILED_PRECONDITION = 9; - ABORTED = 10; - OUT_OF_RANGE = 11; - UNIMPLEMENTED = 12; - INTERNAL = 13; - UNAVAILABLE = 14; - DATA_LOSS = 15; - UNAUTHENTICATED = 16; - } - - // Logs only responses that have any one of the gRPC statuses in this field. - repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - - // If included and set to true, the filter will instead block all responses with a gRPC status or - // inferred gRPC status enumerated in statuses, and allow all other responses. - bool exclude = 2; -} - -// Extension filter is statically registered at runtime. -message ExtensionFilter { - // The name of the filter implementation to instantiate. The name must - // match a statically registered filter. - string name = 1; - - // Custom configuration that depends on the filter being instantiated. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} diff --git a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto b/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto deleted file mode 100644 index 2e35bb7f7c5b3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.dubbo.router.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.dubbo_proxy.router.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Router] -// Dubbo router :ref:`configuration overview `. - -message Router { -} diff --git a/generated_api_shadow/envoy/config/filter/fault/v2/BUILD b/generated_api_shadow/envoy/config/filter/fault/v2/BUILD deleted file mode 100644 index 29613b4c3487b..0000000000000 --- a/generated_api_shadow/envoy/config/filter/fault/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto b/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto deleted file mode 100644 index 016140d10f84a..0000000000000 --- a/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto +++ /dev/null @@ -1,87 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.fault.v2; - -import "envoy/type/percent.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.fault.v2"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.common.fault.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common fault injection types] - -// Delay specification is used to inject latency into the -// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. -// [#next-free-field: 6] -message FaultDelay { - enum FaultDelayType { - // Unused and deprecated. - FIXED = 0; - } - - // Fault delays are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderDelay { - } - - reserved 2; - - // Unused and deprecated. Will be removed in the next release. - FaultDelayType type = 1 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - oneof fault_delay_secifier { - option (validate.required) = true; - - // Add a fixed delay before forwarding the operation upstream. See - // https://developers.google.com/protocol-buffers/docs/proto3#json for - // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified - // delay will be injected before a new request/operation. For TCP - // connections, the proxying of the connection upstream will be delayed - // for the specified period. This is required if type is FIXED. - google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; - - // Fault delays are controlled via an HTTP header (if applicable). - HeaderDelay header_delay = 5; - } - - // The percentage of operations/connections/requests on which the delay will be injected. - type.FractionalPercent percentage = 4; -} - -// Describes a rate limit to be applied. -message FaultRateLimit { - // Describes a fixed/constant rate limit. - message FixedLimit { - // The limit supplied in KiB/s. - uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}]; - } - - // Rate limits are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderLimit { - } - - oneof limit_type { - option (validate.required) = true; - - // A fixed rate limit. - FixedLimit fixed_limit = 1; - - // Rate limits are controlled via an HTTP header (if applicable). - HeaderLimit header_limit = 3; - } - - // The percentage of operations/connections/requests on which the rate limit will be injected. - type.FractionalPercent percentage = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD deleted file mode 100644 index 2ffbc958786b3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto b/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto deleted file mode 100644 index 6860b6d6ef2b4..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto +++ /dev/null @@ -1,94 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.adaptive_concurrency.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha"; -option java_outer_classname = "AdaptiveConcurrencyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.adaptive_concurrency.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Adaptive Concurrency] -// Adaptive Concurrency Control :ref:`configuration overview -// `. -// [#extension: envoy.filters.http.adaptive_concurrency] - -// Configuration parameters for the gradient controller. -message GradientControllerConfig { - // Parameters controlling the periodic recalculation of the concurrency limit from sampled request - // latencies. - message ConcurrencyLimitCalculationParams { - // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000. - google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}]; - - // The period of time samples are taken to recalculate the concurrency limit. - google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = { - required: true - gt {} - }]; - } - - // Parameters controlling the periodic minRTT recalculation. - // [#next-free-field: 6] - message MinimumRTTCalculationParams { - // The time interval between recalculating the minimum request round-trip time. - google.protobuf.Duration interval = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // The number of requests to aggregate/sample during the minRTT recalculation window before - // updating. Defaults to 50. - google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}]; - - // Randomized time delta that will be introduced to the start of the minRTT calculation window. - // This is represented as a percentage of the interval duration. Defaults to 15%. - // - // Example: If the interval is 10s and the jitter is 15%, the next window will begin - // somewhere in the range (10s - 11.5s). - type.Percent jitter = 3; - - // The concurrency limit set while measuring the minRTT. Defaults to 3. - google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}]; - - // Amount added to the measured minRTT to add stability to the concurrency limit during natural - // variability in latency. This is expressed as a percentage of the measured value and can be - // adjusted to allow more or less tolerance to the sampled latency values. - // - // Defaults to 25%. - type.Percent buffer = 5; - } - - // The percentile to use when summarizing aggregated samples. Defaults to p50. - type.Percent sample_aggregate_percentile = 1; - - ConcurrencyLimitCalculationParams concurrency_limit_params = 2 - [(validate.rules).message = {required: true}]; - - MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}]; -} - -message AdaptiveConcurrency { - oneof concurrency_controller_config { - option (validate.required) = true; - - // Gradient concurrency control will be used. - GradientControllerConfig gradient_controller_config = 1 - [(validate.rules).message = {required: true}]; - } - - // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the - // message is unspecified, the filter will be enabled. - api.v2.core.RuntimeFeatureFlag enabled = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto b/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto deleted file mode 100644 index 43823286286a3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.aws_lambda.v2alpha; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.aws_lambda.v2alpha"; -option java_outer_classname = "AwsLambdaProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.aws_lambda.v3"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: AWS Lambda] -// AWS Lambda :ref:`configuration overview `. -// [#extension: envoy.filters.http.aws_lambda] - -// AWS Lambda filter config -message Config { - enum InvocationMode { - // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In - // this mode the output of the Lambda function becomes the response of the HTTP request. - SYNCHRONOUS = 0; - - // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be - // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the - // call which is translated to an HTTP 200 OK by the filter. - ASYNCHRONOUS = 1; - } - - // The ARN of the AWS Lambda to invoke when the filter is engaged - // Must be in the following format: - // arn::lambda:::function: - string arn = 1 [(validate.rules).string = {min_len: 1}]; - - // Whether to transform the request (headers and body) to a JSON payload or pass it as is. - bool payload_passthrough = 2; - - // Determines the way to invoke the Lambda function. - InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; -} - -// Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different -// version of the same Lambda depending on the route. -message PerRouteConfig { - Config invoke_config = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto b/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto deleted file mode 100644 index 5ebb92c01dfa8..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.aws_request_signing.v2alpha; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.aws_request_signing.v2alpha"; -option java_outer_classname = "AwsRequestSigningProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.aws_request_signing.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: AwsRequestSigning] -// AwsRequestSigning :ref:`configuration overview `. -// [#extension: envoy.filters.http.aws_request_signing] - -// Top level configuration for the AWS request signing filter. -message AwsRequestSigning { - // The `service namespace - // `_ - // of the HTTP endpoint. - // - // Example: s3 - string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The `region `_ hosting the HTTP - // endpoint. - // - // Example: us-west-2 - string region = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Indicates that before signing headers, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both signing and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for signing whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite = 3; -} diff --git a/generated_api_shadow/envoy/config/filter/http/buffer/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/buffer/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/buffer/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto b/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto deleted file mode 100644 index 56961d22fe092..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.buffer.v2; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v2"; -option java_outer_classname = "BufferProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.buffer.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Buffer] -// Buffer :ref:`configuration overview `. -// [#extension: envoy.filters.http.buffer] - -message Buffer { - reserved 2; - - // The maximum request size that the filter will buffer before the connection - // manager will stop buffering and return a 413 response. - google.protobuf.UInt32Value max_request_bytes = 1 - [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}]; -} - -message BufferPerRoute { - oneof override { - option (validate.required) = true; - - // Disable the buffer filter for this particular vhost or route. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Override the global configuration of the filter with this new config. - Buffer buffer = 2 [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/BUILD deleted file mode 100644 index 5cbf4e821fc81..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/route:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto deleted file mode 100644 index 98035c05d45a6..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto +++ /dev/null @@ -1,77 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.cache.v2alpha; - -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.cache.v2alpha"; -option java_outer_classname = "CacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.cache.v3alpha"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP Cache Filter] -// [#extension: envoy.filters.http.cache] - -message CacheConfig { - // [#not-implemented-hide:] - // Modifies cache key creation by restricting which parts of the URL are included. - message KeyCreatorParams { - // If true, exclude the URL scheme from the cache key. Set to true if your origins always - // produce the same response for http and https requests. - bool exclude_scheme = 1; - - // If true, exclude the host from the cache key. Set to true if your origins' responses don't - // ever depend on host. - bool exclude_host = 2; - - // If *query_parameters_included* is nonempty, only query parameters matched - // by one or more of its matchers are included in the cache key. Any other - // query params will not affect cache lookup. - repeated api.v2.route.QueryParameterMatcher query_parameters_included = 3; - - // If *query_parameters_excluded* is nonempty, query parameters matched by one - // or more of its matchers are excluded from the cache key (even if also - // matched by *query_parameters_included*), and will not affect cache lookup. - repeated api.v2.route.QueryParameterMatcher query_parameters_excluded = 4; - } - - // Config specific to the cache storage implementation. - google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - - // List of matching rules that defines allowed *Vary* headers. - // - // The *vary* response header holds a list of header names that affect the - // contents of a response, as described by - // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. - // - // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't matched by any rules in - // *allowed_vary_headers*, that response will not be cached. - // - // During lookup, *allowed_vary_headers* controls what request headers will be - // sent to the cache storage implementation. - repeated type.matcher.StringMatcher allowed_vary_headers = 2; - - // [#not-implemented-hide:] - // - // - // Modifies cache key creation by restricting which parts of the URL are included. - KeyCreatorParams key_creator_params = 3; - - // [#not-implemented-hide:] - // - // - // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache - // storage implementation may have its own limit beyond which it will reject insertions). - uint32 max_body_bytes = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/http/compressor/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/compressor/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/compressor/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto b/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto deleted file mode 100644 index d62d0d7a42fab..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.compressor.v2; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.compressor.v2"; -option java_outer_classname = "CompressorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.compressor.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Compressor] - -// [#next-free-field: 6] -message Compressor { - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value content_length = 1; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" - // and their synonyms. - repeated string content_type = 2; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - bool disable_on_etag_header = 3; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // .. attention: - // - // To avoid interfering with other compression filters in the same chain use this option in - // the filter closest to the upstream. - bool remove_accept_encoding_header = 4; - - // Runtime flag that controls whether the filter is enabled or not. If set to false, the - // filter will operate as a pass-through filter. If not specified, defaults to enabled. - api.v2.core.RuntimeFeatureFlag runtime_enabled = 5; -} diff --git a/generated_api_shadow/envoy/config/filter/http/cors/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/cors/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/cors/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto b/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto deleted file mode 100644 index 9060a9c38fda1..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.cors.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.cors.v2"; -option java_outer_classname = "CorsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.cors.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Cors] -// CORS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.cors] - -// Cors filter config. -message Cors { -} diff --git a/generated_api_shadow/envoy/config/filter/http/csrf/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/csrf/v2/BUILD deleted file mode 100644 index aaab1df155473..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/csrf/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto b/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto deleted file mode 100644 index 3c2c9110e9fe0..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.csrf.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/matcher/string.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2"; -option java_outer_classname = "CsrfProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.csrf.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: CSRF] -// Cross-Site Request Forgery :ref:`configuration overview `. -// [#extension: envoy.filters.http.csrf] - -// CSRF filter config. -message CsrfPolicy { - // Specifies the % of requests for which the CSRF filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - api.v2.core.RuntimeFractionalPercent filter_enabled = 1 - [(validate.rules).message = {required: true}]; - - // Specifies that CSRF policies will be evaluated and tracked, but not enforced. - // - // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* and *Destination* to determine if it's valid, but will not - // enforce any policies. - api.v2.core.RuntimeFractionalPercent shadow_enabled = 2; - - // Specifies additional source origins that will be allowed in addition to - // the destination origin. - // - // More information on how this can be configured via runtime can be found - // :ref:`here `. - repeated type.matcher.StringMatcher additional_origins = 3; -} diff --git a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD deleted file mode 100644 index 25c228fd56093..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto deleted file mode 100644 index 436bb6bf46160..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ /dev/null @@ -1,61 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.dynamic_forward_proxy.v2alpha; - -import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha"; -option java_outer_classname = "DynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.dynamic_forward_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamic forward proxy] - -// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.filters.http.dynamic_forward_proxy] -message FilterConfig { - // The DNS cache configuration that the filter will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy cluster configuration - // `. - common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; -} - -// Per route Configuration for the dynamic forward proxy HTTP filter. -message PerRouteConfig { - oneof host_rewrite_specifier { - // Indicates that before DNS lookup, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for DNS lookups whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite = 1 [(udpa.annotations.field_migrate).rename = "host_rewrite_literal"]; - - // Indicates that before DNS lookup, the host header will be swapped with - // the value of this header. If not set or empty, the original host header - // value will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite header ` - // given that the value set here would be used for DNS lookups whereas the value set in the HCM - // would be used for host header forwarding which is not the desired outcome. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string auto_host_rewrite_header = 2 - [(udpa.annotations.field_migrate).rename = "host_rewrite_header"]; - } -} diff --git a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/dynamo/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto b/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto deleted file mode 100644 index 011d22f768c8c..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.dynamo.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.dynamo.v2"; -option java_outer_classname = "DynamoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.dynamo.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamo] -// Dynamo :ref:`configuration overview `. -// [#extension: envoy.filters.http.dynamo] - -// Dynamo filter config. -message Dynamo { -} diff --git a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/BUILD deleted file mode 100644 index 74e703c963cb6..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto deleted file mode 100644 index b9a807d82edb2..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ /dev/null @@ -1,234 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.ext_authz.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/grpc_service.proto"; -import "envoy/api/v2/core/http_uri.proto"; -import "envoy/type/http_status.proto"; -import "envoy/type/matcher/string.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.ext_authz.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: External Authorization] -// External Authorization :ref:`configuration overview `. -// [#extension: envoy.filters.http.ext_authz] - -// [#next-free-field: 12] -message ExtAuthz { - // External authorization service configuration. - oneof services { - // gRPC service configuration (default timeout: 200ms). - api.v2.core.GrpcService grpc_service = 1; - - // HTTP service configuration (default timeout: 200ms). - HttpService http_service = 3; - } - - // Changes filter's behaviour on errors: - // - // 1. When set to true, the filter will *accept* client request even if the communication with - // the authorization service has failed, or if the authorization service has returned a HTTP 5xx - // error. - // - // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* - // response if the communication with the authorization service has failed, or if the - // authorization service has returned a HTTP 5xx error. - // - // Note that errors can be *always* tracked in the :ref:`stats - // `. - bool failure_mode_allow = 2; - - // [#not-implemented-hide: Support for this field has been removed.] - bool use_alpha = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Enables filter to buffer the client request body and send it within the authorization request. - // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization - // request message indicating if the body data is partial. - BufferSettings with_request_body = 5; - - // Clears route cache in order to allow the external authorization service to correctly affect - // routing decisions. Filter clears all cached routes when: - // - // 1. The field is set to *true*. - // - // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. - // - // 3. At least one *authorization response header* is added to the client request, or is used for - // altering another client request header. - // - bool clear_route_cache = 6; - - // Sets the HTTP status that is returned to the client when there is a network error between the - // filter and the authorization server. The default status is HTTP 403 Forbidden. - type.HttpStatus status_on_error = 7; - - // Specifies a list of metadata namespaces whose values, if present, will be passed to the - // ext_authz service as an opaque *protobuf::Struct*. - // - // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata - // ` is set, - // then the following will pass the jwt payload to the authorization server. - // - // .. code-block:: yaml - // - // metadata_context_namespaces: - // - envoy.filters.http.jwt_authn - // - repeated string metadata_context_namespaces = 8; - - // Specifies if the filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // If this field is not specified, the filter will be enabled for all requests. - api.v2.core.RuntimeFractionalPercent filter_enabled = 9; - - // Specifies whether to deny the requests, when the filter is disabled. - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to determine whether to deny request for - // filter protected path at filter disabling. If filter is disabled in - // typed_per_filter_config for the path, requests will not be denied. - // - // If this field is not specified, all requests will be allowed when disabled. - api.v2.core.RuntimeFeatureFlag deny_at_disable = 11; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 10; -} - -// Configuration for buffering the request data. -message BufferSettings { - // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return - // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number - // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow - // `. - uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; - - // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. - // The authorization request will be dispatched and no 413 HTTP error will be returned by the - // filter. - bool allow_partial_message = 2; -} - -// HttpService is used for raw HTTP communication between the filter and the authorization service. -// When configured, the filter will parse the client request and use these attributes to call the -// authorization server. Depending on the response, the filter may reject or accept the client -// request. Note that in any of these events, metadata can be added, removed or overridden by the -// filter: -// -// *On authorization request*, a list of allowed request headers may be supplied. See -// :ref:`allowed_headers -// ` -// for details. Additional headers metadata may be added to the authorization request. See -// :ref:`headers_to_add -// ` for -// details. -// -// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and -// additional headers metadata may be added to the original client request. See -// :ref:`allowed_upstream_headers -// ` -// for details. -// -// On other authorization response statuses, the filter will not allow traffic. Additional headers -// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers -// ` -// for details. -// [#next-free-field: 9] -message HttpService { - reserved 3, 4, 5, 6; - - // Sets the HTTP server URI which the authorization requests must be sent to. - api.v2.core.HttpUri server_uri = 1; - - // Sets a prefix to the value of authorization request header *Path*. - string path_prefix = 2; - - // Settings used for controlling authorization request metadata. - AuthorizationRequest authorization_request = 7; - - // Settings used for controlling authorization response metadata. - AuthorizationResponse authorization_response = 8; -} - -message AuthorizationRequest { - // Authorization request will include the client request headers that have a correspondent match - // in the :ref:`list `. Note that in addition to the - // user's supplied matchers: - // - // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. - // - // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have - // a message body. However, the authorization request can include the buffered client request body - // (controlled by :ref:`with_request_body - // ` setting), - // consequently the value of *Content-Length* of the authorization request reflects the size of - // its payload size. - // - type.matcher.ListStringMatcher allowed_headers = 1; - - // Sets a list of headers that will be included to the request to authorization service. Note that - // client request of the same key will be overridden. - repeated api.v2.core.HeaderValue headers_to_add = 2; -} - -message AuthorizationResponse { - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the original client request. - // Note that coexistent headers will be overridden. - type.matcher.ListStringMatcher allowed_upstream_headers = 1; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that when this list is *not* set, all the authorization response headers, except *Authority - // (Host)* will be in the response to the client. When a header is included in this list, *Path*, - // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. - type.matcher.ListStringMatcher allowed_client_headers = 2; -} - -// Extra settings on a per virtualhost/route/weighted-cluster level. -message ExtAuthzPerRoute { - oneof override { - option (validate.required) = true; - - // Disable the ext auth filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Check request settings for this route. - CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; - } -} - -// Extra settings for the check request. You can use this to provide extra context for the -// external authorization server on specific virtual hosts \ routes. For example, adding a context -// extension on the virtual host level can give the ext-authz server information on what virtual -// host is used without needing to parse the host header. If CheckSettings is specified in multiple -// per-filter-configs, they will be merged in order, and the result will be used. -message CheckSettings { - // Context extensions to set on the CheckRequest's - // :ref:`AttributeContext.context_extensions` - // - // Merge semantics for this field are such that keys from more specific configs override. - // - // .. note:: - // - // These settings are only applied to a filter configured with a - // :ref:`grpc_service`. - map context_extensions = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/http/fault/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/fault/v2/BUILD deleted file mode 100644 index df4feab714ff4..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/fault/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/route:pkg", - "//envoy/config/filter/fault/v2:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto b/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto deleted file mode 100644 index cb99b0d71bbdc..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto +++ /dev/null @@ -1,129 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.fault.v2; - -import "envoy/api/v2/route/route_components.proto"; -import "envoy/config/filter/fault/v2/fault.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v2"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.fault.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Fault Injection] -// Fault Injection :ref:`configuration overview `. -// [#extension: envoy.filters.http.fault] - -message FaultAbort { - // Fault aborts are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderAbort { - } - - reserved 1; - - oneof error_type { - option (validate.required) = true; - - // HTTP status code to use to abort the HTTP request. - uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // Fault aborts are controlled via an HTTP header (if applicable). - HeaderAbort header_abort = 4; - } - - // The percentage of requests/operations/connections that will be aborted with the error code - // provided. - type.FractionalPercent percentage = 3; -} - -// [#next-free-field: 14] -message HTTPFault { - // If specified, the filter will inject delays based on the values in the - // object. - filter.fault.v2.FaultDelay delay = 1; - - // If specified, the filter will abort requests based on the values in - // the object. At least *abort* or *delay* must be specified. - FaultAbort abort = 2; - - // Specifies the name of the (destination) upstream cluster that the - // filter should match on. Fault injection will be restricted to requests - // bound to the specific upstream cluster. - string upstream_cluster = 3; - - // Specifies a set of headers that the filter should match on. The fault - // injection filter can be applied selectively to requests that match a set of - // headers specified in the fault filter config. The chances of actual fault - // injection further depend on the value of the :ref:`percentage - // ` field. - // The filter will check the request's headers against all the specified - // headers in the filter config. A match will happen if all the headers in the - // config are present in the request with the same values (or based on - // presence if the *value* field is not in the config). - repeated api.v2.route.HeaderMatcher headers = 4; - - // Faults are injected for the specified list of downstream hosts. If this - // setting is not set, faults are injected for all downstream nodes. - // Downstream node name is taken from :ref:`the HTTP - // x-envoy-downstream-service-node - // ` header and compared - // against downstream_nodes list. - repeated string downstream_nodes = 5; - - // The maximum number of faults that can be active at a single time via the configured fault - // filter. Note that because this setting can be overridden at the route level, it's possible - // for the number of active faults to be greater than this value (if injected via a different - // route). If not specified, defaults to unlimited. This setting can be overridden via - // `runtime ` and any faults that are not injected - // due to overflow will be indicated via the `faults_overflow - // ` stat. - // - // .. attention:: - // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy - // limit. It's possible for the number of active faults to rise slightly above the configured - // amount due to the implementation details. - google.protobuf.UInt32Value max_active_faults = 6; - - // The response rate limit to be applied to the response body of the stream. When configured, - // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent - // ` runtime key. - // - // .. attention:: - // This is a per-stream limit versus a connection level limit. This means that concurrent streams - // will each get an independent limit. - filter.fault.v2.FaultRateLimit response_rate_limit = 7; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_delay_percent - string delay_percent_runtime = 8; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.abort_percent - string abort_percent_runtime = 9; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_duration_ms - string delay_duration_runtime = 10; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.http_status - string abort_http_status_runtime = 11; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.max_active_faults - string max_active_faults_runtime = 12; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.rate_limit.response_percent - string response_rate_limit_percent_runtime = 13; -} diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto b/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto deleted file mode 100644 index b4331dad5031c..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.grpc_http1_bridge.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_bridge.v2"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_http1_bridge.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC HTTP/1.1 Bridge] -// gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_http1_bridge] - -// gRPC HTTP/1.1 Bridge filter config. -message Config { -} diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto b/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto deleted file mode 100644 index 8b916d327e194..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] -// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview -// `. -// [#extension: envoy.filters.http.grpc_http1_reverse_bridge] - -// gRPC reverse bridge filter configuration -message FilterConfig { - // The content-type to pass to the upstream when the gRPC bridge filter is applied. - // The filter will also validate that the upstream responds with the same content type. - string content_type = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If true, Envoy will assume that the upstream doesn't understand gRPC frames and - // strip the gRPC frame from the request, and add it back in to the response. This will - // hide the gRPC semantics from the upstream, allowing it to receive and respond with a - // simple binary encoded protobuf. - bool withhold_grpc_frames = 2; -} - -// gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level. -message FilterConfigPerRoute { - // If true, disables gRPC reverse bridge filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto b/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto deleted file mode 100644 index fea48e6bb64f9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.grpc_stats.v2alpha; - -import "envoy/api/v2/core/grpc_method_list.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_stats.v2alpha"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_stats.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC statistics] gRPC statistics filter -// :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_stats] - -// gRPC statistics filter configuration -message FilterConfig { - // If true, the filter maintains a filter state object with the request and response message - // counts. - bool emit_filter_state = 1; - - oneof per_method_stat_specifier { - // If set, specifies an allowlist of service/methods that will have individual stats - // emitted for them. Any call that does not match the allowlist will be counted - // in a stat with no method specifier: `cluster..grpc.*`. - api.v2.core.GrpcMethodList individual_method_stats_allowlist = 2; - - // If set to true, emit stats for all service/method names. - // - // If set to false, emit stats for all service/message types to the same stats without including - // the service/method in the name, with prefix `cluster..grpc`. This can be useful if - // service/method granularity is not needed, or if each cluster only receives a single method. - // - // .. attention:: - // This option is only safe if all clients are trusted. If this option is enabled - // with untrusted clients, the clients could cause unbounded growth in the number of stats in - // Envoy, using unbounded memory and potentially slowing down stats pipelines. - // - // .. attention:: - // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the - // behavior will default to `stats_for_all_methods=false`. This default value is changed due - // to the previous value being deprecated. This behavior can be changed with runtime override - // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. - google.protobuf.BoolValue stats_for_all_methods = 3; - } -} - -// gRPC statistics filter state object in protobuf form. -message FilterObject { - // Count of request messages in the request stream. - uint64 request_message_count = 1; - - // Count of response messages in the response stream. - uint64 response_message_count = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto b/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto deleted file mode 100644 index be23b4d87b585..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.grpc_web.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_web.v2"; -option java_outer_classname = "GrpcWebProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_web.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC Web] -// gRPC Web :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_web] - -// gRPC Web filter config. -message GrpcWeb { -} diff --git a/generated_api_shadow/envoy/config/filter/http/gzip/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/gzip/v2/BUILD deleted file mode 100644 index 9cb0d12934218..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/gzip/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/filter/http/compressor/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto b/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto deleted file mode 100644 index 0c134c6208b15..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto +++ /dev/null @@ -1,96 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.gzip.v2; - -import "envoy/config/filter/http/compressor/v2/compressor.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v2"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.gzip.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Gzip] - -// [#next-free-field: 11] -message Gzip { - enum CompressionStrategy { - DEFAULT = 0; - FILTERED = 1; - HUFFMAN = 2; - RLE = 3; - } - - message CompressionLevel { - enum Enum { - DEFAULT = 0; - BEST = 1; - SPEED = 2; - } - } - - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; - - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - google.protobuf.UInt32Value content_length = 2 [deprecated = true]; - - // A value used for selecting the zlib compression level. This setting will affect speed and - // amount of compression applied to the content. "BEST" provides higher compression at the cost of - // higher latency, "SPEED" provides lower compression with minimum impact on response time. - // "DEFAULT" provides an optimal result between speed and compression. This field will be set to - // "DEFAULT" if not specified. - CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; - - // A value used for selecting the zlib compression strategy which is directly related to the - // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though - // there are situations which changing this parameter might produce better results. For example, - // run-length encoding (RLE) is typically used when the content is known for having sequences - // which same data occurs many consecutive times. For more information about each strategy, please - // refer to zlib manual. - CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml". - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - repeated string content_type = 6 [deprecated = true]; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - bool disable_on_etag_header = 7 [deprecated = true]; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - bool remove_accept_encoding_header = 8 [deprecated = true]; - - // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 12 - // which will produce a 4096 bytes window. For more details about this parameter, please refer to - // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Set of configuration parameters common for all compression filters. If this field is set then - // the fields `content_length`, `content_type`, `disable_on_etag_header` and - // `remove_accept_encoding_header` are ignored. - compressor.v2.Compressor compressor = 10; -} diff --git a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto b/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto deleted file mode 100644 index 30de69d98b1c3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto +++ /dev/null @@ -1,100 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.header_to_metadata.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2"; -option java_outer_classname = "HeaderToMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.header_to_metadata.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Header-To-Metadata Filter] -// -// The configuration for transforming headers into metadata. This is useful -// for matching load balancer subsets, logging, etc. -// -// Header to Metadata :ref:`configuration overview `. -// [#extension: envoy.filters.http.header_to_metadata] - -message Config { - enum ValueType { - STRING = 0; - - NUMBER = 1; - - // The value is a serialized `protobuf.Value - // `_. - PROTOBUF_VALUE = 2; - } - - // ValueEncode defines the encoding algorithm. - enum ValueEncode { - // The value is not encoded. - NONE = 0; - - // The value is encoded in `Base64 `_. - // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the - // non-ASCII characters in the header. - BASE64 = 1; - } - - // [#next-free-field: 6] - message KeyValuePair { - // The namespace — if this is empty, the filter's namespace will be used. - string metadata_namespace = 1; - - // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The value to pair with the given key. - // - // When used for a `on_header_present` case, if value is non-empty it'll be used - // instead of the header value. If both are empty, no metadata is added. - // - // When used for a `on_header_missing` case, a non-empty value must be provided - // otherwise no metadata is added. - string value = 3; - - // The value's type — defaults to string. - ValueType type = 4; - - // How is the value encoded, default is NONE (not encoded). - // The value will be decoded accordingly before storing to metadata. - ValueEncode encode = 5; - } - - // A Rule defines what metadata to apply when a header is present or missing. - message Rule { - // The header that triggers this rule — required. - string header = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If the header is present, apply this metadata KeyValuePair. - // - // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header value. - KeyValuePair on_header_present = 2; - - // If the header is not present, apply this metadata KeyValuePair. - // - // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header value. - KeyValuePair on_header_missing = 3; - - // Whether or not to remove the header after a rule is applied. - // - // This prevents headers from leaking. - bool remove = 4; - } - - // The list of rules to apply to requests. - repeated Rule request_rules = 1; - - // The list of rules to apply to responses. - repeated Rule response_rules = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/health_check/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/health_check/v2/BUILD deleted file mode 100644 index 22fc8fd458e61..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/health_check/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/route:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto b/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto deleted file mode 100644 index 7f2a486b26188..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.health_check.v2; - -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.health_check.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Health check] -// Health check :ref:`configuration overview `. -// [#extension: envoy.filters.http.health_check] - -// [#next-free-field: 6] -message HealthCheck { - reserved 2; - - // Specifies whether the filter operates in pass through mode or not. - google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; - - // If operating in pass through mode, the amount of time in milliseconds - // that the filter should cache the upstream response. - google.protobuf.Duration cache_time = 3; - - // If operating in non-pass-through mode, specifies a set of upstream cluster - // names and the minimum percentage of servers in each of those clusters that - // must be healthy or degraded in order for the filter to return a 200. - // - // .. note:: - // - // This value is interpreted as an integer by truncating, so 12.50% will be calculated - // as if it were 12%. - map cluster_min_healthy_percentages = 4; - - // Specifies a set of health check request headers to match on. The health check filter will - // check a request’s headers against all the specified headers. To specify the health check - // endpoint, set the ``:path`` header to match on. - repeated api.v2.route.HeaderMatcher headers = 5; -} diff --git a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto deleted file mode 100644 index f99b18a12c716..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.ip_tagging.v2; - -import "envoy/api/v2/core/address.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v2"; -option java_outer_classname = "IpTaggingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.ip_tagging.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: IP tagging] -// IP tagging :ref:`configuration overview `. -// [#extension: envoy.filters.http.ip_tagging] - -message IPTagging { - // The type of requests the filter should apply to. The supported types - // are internal, external or both. The - // :ref:`x-forwarded-for` header is - // used to determine if a request is internal and will result in - // :ref:`x-envoy-internal` - // being set. The filter defaults to both, and it will apply to all request types. - enum RequestType { - // Both external and internal requests will be tagged. This is the default value. - BOTH = 0; - - // Only internal requests will be tagged. - INTERNAL = 1; - - // Only external requests will be tagged. - EXTERNAL = 2; - } - - // Supplies the IP tag name and the IP address subnets. - message IPTag { - // Specifies the IP tag name to apply. - string ip_tag_name = 1; - - // A list of IP address subnets that will be tagged with - // ip_tag_name. Both IPv4 and IPv6 are supported. - repeated api.v2.core.CidrRange ip_list = 2; - } - - // The type of request the filter should apply to. - RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. - // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] - // The set of IP tags for the filter. - repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/BUILD deleted file mode 100644 index 1e485f4e158ab..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/README.md b/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/README.md deleted file mode 100644 index d253c3f234a88..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# JWT Authentication HTTP filter config - -## Overview - -1. The proto file in this folder defines an HTTP filter config for "jwt_authn" filter. - -2. This filter will verify the JWT in the HTTP request as: - - The signature should be valid - - JWT should not be expired - - Issuer and audiences are valid and specified in the filter config. - -3. [JWK](https://tools.ietf.org/html/rfc7517#appendix-A) is needed to verify JWT signature. It can be fetched from a remote server or read from a local file. If the JWKS is fetched remotely, it will be cached by the filter. - -3. If a JWT is valid, the user is authenticated and the request will be forwarded to the backend server. If a JWT is not valid, the request will be rejected with an error message. - -## The locations to extract JWT - -JWT will be extracted from the HTTP headers or query parameters. The default location is the HTTP header: -``` -Authorization: Bearer -``` -The next default location is in the query parameter as: -``` -?access_token= -``` - -If a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT. - -## HTTP header to pass successfully verified JWT - -If a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64url-encoded JWT payload in JSON. - - -## Further header options - -In addition to the `name` field, which specifies the HTTP header name, -the `from_headers` section can specify an optional `value_prefix` value, as in: - -```yaml - from_headers: - - name: bespoke - value_prefix: jwt_value -``` - -The above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`. - -Any non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped, -and all following, contiguous, JWT-legal chars will be taken as the JWT. - -This means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`: - -```text -bespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk - -bespoke: {"jwt_value": "eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk"} - -bespoke: beta:true,jwt_value:"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk",trace=1234 -``` - -The header `name` may be `Authorization`. - -The `value_prefix` must match exactly, i.e., case-sensitively. -If the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token. - -If there are no JWT-legal characters after the `value_prefix`, the entire string after it -is taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser. diff --git a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto deleted file mode 100644 index 07044f92201e9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ /dev/null @@ -1,500 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.jwt_authn.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/http_uri.proto"; -import "envoy/api/v2/route/route_components.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.jwt_authn.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: JWT Authentication] -// JWT Authentication :ref:`configuration overview `. -// [#extension: envoy.filters.http.jwt_authn] - -// Please see following for JWT authentication flow: -// -// * `JSON Web Token (JWT) `_ -// * `The OAuth 2.0 Authorization Framework `_ -// * `OpenID Connect `_ -// -// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: -// -// * issuer: the principal that issues the JWT. It has to match the one from the token. -// * allowed audiences: the ones in the token have to be listed here. -// * how to fetch public key JWKS to verify the token signature. -// * how to extract JWT token in the request. -// * how to pass successfully verified token payload. -// -// Example: -// -// .. code-block:: yaml -// -// issuer: https://example.com -// audiences: -// - bookstore_android.apps.googleusercontent.com -// - bookstore_web.apps.googleusercontent.com -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// cache_duration: -// seconds: 300 -// -// [#next-free-field: 10] -message JwtProvider { - // Specify the `principal `_ that issued - // the JWT, usually a URL or an email address. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com - // - string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The list of JWT `audiences `_ are - // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, - // will not check audiences in the token. - // - // Example: - // - // .. code-block:: yaml - // - // audiences: - // - bookstore_android.apps.googleusercontent.com - // - bookstore_web.apps.googleusercontent.com - // - repeated string audiences = 2; - - // `JSON Web Key Set (JWKS) `_ is needed to - // validate signature of a JWT. This field specifies where to fetch JWKS. - oneof jwks_source_specifier { - option (validate.required) = true; - - // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP - // URI and how the fetched JWKS should be cached. - // - // Example: - // - // .. code-block:: yaml - // - // remote_jwks: - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // cache_duration: - // seconds: 300 - // - RemoteJwks remote_jwks = 3; - - // JWKS is in local data source. It could be either in a local file or embedded in the - // inline_string. - // - // Example: local file - // - // .. code-block:: yaml - // - // local_jwks: - // filename: /etc/envoy/jwks/jwks1.txt - // - // Example: inline_string - // - // .. code-block:: yaml - // - // local_jwks: - // inline_string: ACADADADADA - // - api.v2.core.DataSource local_jwks = 4; - } - - // If false, the JWT is removed in the request after a success verification. If true, the JWT is - // not removed in the request. Default value is false. - bool forward = 5; - - // Two fields below define where to extract the JWT from an HTTP request. - // - // If no explicit location is specified, the following default locations are tried in order: - // - // 1. The Authorization header using the `Bearer schema - // `_. Example:: - // - // Authorization: Bearer . - // - // 2. `access_token `_ query parameter. - // - // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations - // its provider specified or from the default locations. - // - // Specify the HTTP headers to extract JWT token. For examples, following config: - // - // .. code-block:: yaml - // - // from_headers: - // - name: x-goog-iap-jwt-assertion - // - // can be used to extract token from header:: - // - // ``x-goog-iap-jwt-assertion: ``. - // - repeated JwtHeader from_headers = 6; - - // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. - // - // For example, if config is: - // - // .. code-block:: yaml - // - // from_params: - // - jwt_token - // - // The JWT format in query parameter is:: - // - // /path?jwt_token= - // - repeated string from_params = 7; - - // This field specifies the header name to forward a successfully verified JWT payload to the - // backend. The forwarded data is:: - // - // base64url_encoded(jwt_payload_in_JSON) - // - // If it is not specified, the payload will not be forwarded. - string forward_payload_header = 8; - - // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata - // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** - // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* - // and the value is the *protobuf::Struct* converted from JWT JSON payload. - // - // For example, if payload_in_metadata is *my_payload*: - // - // .. code-block:: yaml - // - // envoy.filters.http.jwt_authn: - // my_payload: - // iss: https://example.com - // sub: test@example.com - // aud: https://example.com - // exp: 1501281058 - // - string payload_in_metadata = 9; -} - -// This message specifies how to fetch JWKS from remote and how to cache it. -message RemoteJwks { - // The HTTP URI to fetch the JWKS. For example: - // - // .. code-block:: yaml - // - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // - api.v2.core.HttpUri http_uri = 1; - - // Duration after which the cached JWKS should be expired. If not specified, default cache - // duration is 5 minutes. - google.protobuf.Duration cache_duration = 2; -} - -// This message specifies a header location to extract JWT token. -message JwtHeader { - // The HTTP header name. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The value prefix. The value format is "value_prefix" - // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the - // end. - string value_prefix = 2; -} - -// Specify a required provider with audiences. -message ProviderWithAudiences { - // Specify a required provider name. - string provider_name = 1; - - // This field overrides the one specified in the JwtProvider. - repeated string audiences = 2; -} - -// This message specifies a Jwt requirement. An empty message means JWT verification is not -// required. Here are some config examples: -// -// .. code-block:: yaml -// -// # Example 1: not required with an empty message -// -// # Example 2: require A -// provider_name: provider-A -// -// # Example 3: require A or B -// requires_any: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 4: require A and B -// requires_all: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 5: require A and (B or C) -// requires_all: -// requirements: -// - provider_name: provider-A -// - requires_any: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 6: require A or (B and C) -// requires_any: -// requirements: -// - provider_name: provider-A -// - requires_all: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows -// missing token.) -// requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// -// # Example 8: A is optional and B is required. -// requires_all: -// requirements: -// - requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// - provider_name: provider-B -// -// [#next-free-field: 7] -message JwtRequirement { - oneof requires_type { - // Specify a required provider name. - string provider_name = 1; - - // Specify a required provider with audiences. - ProviderWithAudiences provider_and_audiences = 2; - - // Specify list of JwtRequirement. Their results are OR-ed. - // If any one of them passes, the result is passed. - JwtRequirementOrList requires_any = 3; - - // Specify list of JwtRequirement. Their results are AND-ed. - // All of them must pass, if one of them fails or missing, it fails. - JwtRequirementAndList requires_all = 4; - - // The requirement is always satisfied even if JWT is missing or the JWT - // verification fails. A typical usage is: this filter is used to only verify - // JWTs and pass the verified JWT payloads to another filter, the other filter - // will make decision. In this mode, all JWT tokens will be verified. - google.protobuf.Empty allow_missing_or_failed = 5; - - // The requirement is satisfied if JWT is missing, but failed if JWT is - // presented but invalid. Similar to allow_missing_or_failed, this is used - // to only verify JWTs and pass the verified payload to another filter. The - // different is this mode will reject requests with invalid tokens. - google.protobuf.Empty allow_missing = 6; - } -} - -// This message specifies a list of RequiredProvider. -// Their results are OR-ed; if any one of them passes, the result is passed -message JwtRequirementOrList { - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a list of RequiredProvider. -// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. -message JwtRequirementAndList { - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a Jwt requirement for a specific Route condition. -// Example 1: -// -// .. code-block:: yaml -// -// - match: -// prefix: /healthz -// -// In above example, "requires" field is empty for /healthz prefix match, -// it means that requests matching the path prefix don't require JWT authentication. -// -// Example 2: -// -// .. code-block:: yaml -// -// - match: -// prefix: / -// requires: { provider_name: provider-A } -// -// In above example, all requests matched the path prefix require jwt authentication -// from "provider-A". -message RequirementRule { - // The route matching parameter. Only when the match is satisfied, the "requires" field will - // apply. - // - // For example: following match will match all requests. - // - // .. code-block:: yaml - // - // match: - // prefix: / - // - api.v2.route.RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. - JwtRequirement requires = 2; -} - -// This message specifies Jwt requirements based on stream_info.filterState. -// This FilterState should use `Router::StringAccessor` object to set a string value. -// Other HTTP filters can use it to specify Jwt requirements dynamically. -// -// Example: -// -// .. code-block:: yaml -// -// name: jwt_selector -// requires: -// issuer_1: -// provider_name: issuer1 -// issuer_2: -// provider_name: issuer2 -// -// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, -// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. -message FilterStateRule { - // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // A map of string keys to requirements. The string key is the string value - // in the FilterState with the name specified in the *name* field above. - map requires = 3; -} - -// This is the Envoy HTTP filter config for JWT authentication. -// -// For example: -// -// .. code-block:: yaml -// -// providers: -// provider1: -// issuer: issuer1 -// audiences: -// - audience1 -// - audience2 -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// provider2: -// issuer: issuer2 -// local_jwks: -// inline_string: jwks_string -// -// rules: -// # Not jwt verification is required for /health path -// - match: -// prefix: /health -// -// # Jwt verification for provider1 is required for path prefixed with "prefix" -// - match: -// prefix: /prefix -// requires: -// provider_name: provider1 -// -// # Jwt verification for either provider1 or provider2 is required for all other requests. -// - match: -// prefix: / -// requires: -// requires_any: -// requirements: -// - provider_name: provider1 -// - provider_name: provider2 -// -message JwtAuthentication { - // Map of provider names to JwtProviders. - // - // .. code-block:: yaml - // - // providers: - // provider1: - // issuer: issuer1 - // audiences: - // - audience1 - // - audience2 - // remote_jwks: - // http_uri: - // uri: https://example.com/.well-known/jwks.json - // cluster: example_jwks_cluster - // provider2: - // issuer: provider2 - // local_jwks: - // inline_string: jwks_string - // - map providers = 1; - - // Specifies requirements based on the route matches. The first matched requirement will be - // applied. If there are overlapped match conditions, please put the most specific match first. - // - // Examples - // - // .. code-block:: yaml - // - // rules: - // - match: - // prefix: /healthz - // - match: - // prefix: /baz - // requires: - // provider_name: provider1 - // - match: - // prefix: /foo - // requires: - // requires_any: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - match: - // prefix: /bar - // requires: - // requires_all: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - repeated RequirementRule rules = 2; - - // This message specifies Jwt requirements based on stream_info.filterState. - // Other HTTP filters can use it to specify Jwt requirements dynamically. - // The *rules* field above is checked first, if it could not find any matches, - // check this one. - FilterStateRule filter_state_rules = 3; - - // When set to true, bypass the `CORS preflight request - // `_ regardless of JWT - // requirements specified in the rules. - bool bypass_cors_preflight = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/http/lua/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/lua/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/lua/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto b/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto deleted file mode 100644 index 068b5e255df5d..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.lua.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v2"; -option java_outer_classname = "LuaProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.lua.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Lua] -// Lua :ref:`configuration overview `. -// [#extension: envoy.filters.http.lua] - -message Lua { - // The Lua code that Envoy will execute. This can be a very small script that - // further loads code from disk if desired. Note that if JSON configuration is used, the code must - // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line - // strings so complex scripts can be easily expressed inline in the configuration. - string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/on_demand/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto b/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto deleted file mode 100644 index 4c5aadf442cf9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.on_demand.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.on_demand.v2"; -option java_outer_classname = "OnDemandProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.on_demand.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: OnDemand] -// IP tagging :ref:`configuration overview `. -// [#extension: envoy.filters.http.on_demand] - -message OnDemand { -} diff --git a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto b/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto deleted file mode 100644 index 8dfb4354d238f..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.original_src.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1"; -option java_outer_classname = "OriginalSrcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.original_src.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Original Src Filter] -// Use the Original source address on upstream connections. - -// The Original Src filter binds upstream connections to the original source address determined -// for the request. This address could come from something like the Proxy Protocol filter, or it -// could come from trusted http headers. -// [#extension: envoy.filters.http.original_src] -message OriginalSrc { - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to - // ensure that non-local addresses may be routed back through envoy when binding to the original - // source address. The option will not be applied if the mark is 0. - uint32 mark = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/BUILD deleted file mode 100644 index 5b66057a82cd7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto deleted file mode 100644 index b9361476bcfde..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto +++ /dev/null @@ -1,66 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.rate_limit.v2; - -import "envoy/config/ratelimit/v2/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v2"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.ratelimit] - -// [#next-free-field: 8] -message RateLimit { - // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Specifies the rate limit configurations to be applied with the same - // stage number. If not set, the default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The type of requests the filter should apply to. The supported - // types are *internal*, *external* or *both*. A request is considered internal if - // :ref:`x-envoy-internal` is set to true. If - // :ref:`x-envoy-internal` is not set or false, a - // request is considered external. The filter defaults to *both*, and it will apply to all request - // types. - string request_type = 3 - [(validate.rules).string = {in: "internal" in: "external" in: "both" in: ""}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 5; - - // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead - // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The - // HTTP code will be 200 for a gRPC response. - bool rate_limited_as_resource_exhausted = 6; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/http/rbac/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/rbac/v2/BUILD deleted file mode 100644 index 90082d083a3f6..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/rbac/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto b/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto deleted file mode 100644 index 87d76a8f913ea..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.rbac.v2; - -import "envoy/config/rbac/v2/rbac.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v2"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.rbac.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.http.rbac] - -// RBAC filter config. -message RBAC { - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - config.rbac.v2.RBAC rules = 1; - - // Shadow rules are not enforced by the filter (i.e., returning a 403) - // but will emit stats and logs and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v2.RBAC shadow_rules = 2; -} - -message RBACPerRoute { - reserved 1; - - // Override the global configuration of the filter with this new config. - // If absent, the global RBAC policy will be disabled for this route. - RBAC rbac = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/router/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/router/v2/BUILD deleted file mode 100644 index 4b7ccc42a6ca4..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/router/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/filter/accesslog/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto b/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto deleted file mode 100644 index c95500cf8168b..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto +++ /dev/null @@ -1,79 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.router.v2; - -import "envoy/config/filter/accesslog/v2/accesslog.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.router.v2"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.router.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Router] -// Router :ref:`configuration overview `. -// [#extension: envoy.filters.http.router] - -// [#next-free-field: 7] -message Router { - // Whether the router generates dynamic cluster statistics. Defaults to - // true. Can be disabled in high performance scenarios. - google.protobuf.BoolValue dynamic_stats = 1; - - // Whether to start a child span for egress routed calls. This can be - // useful in scenarios where other filters (auth, ratelimit, etc.) make - // outbound calls and have child spans rooted at the same ingress - // parent. Defaults to false. - bool start_child_span = 2; - - // Configuration for HTTP upstream logs emitted by the router. Upstream logs - // are configured in the same way as access logs, but each log entry represents - // an upstream request. Presuming retries are configured, multiple upstream - // requests may be made for each downstream (inbound) request. - repeated accesslog.v2.AccessLog upstream_log = 3; - - // Do not add any additional *x-envoy-* headers to requests or responses. This - // only affects the :ref:`router filter generated *x-envoy-* headers - // `, other Envoy filters and the HTTP - // connection manager may continue to set *x-envoy-* headers. - bool suppress_envoy_headers = 4; - - // Specifies a list of HTTP headers to strictly validate. Envoy will reject a - // request and respond with HTTP status 400 if the request contains an invalid - // value for any of the headers listed in this field. Strict header checking - // is only supported for the following headers: - // - // Value must be a ','-delimited list (i.e. no spaces) of supported retry - // policy values: - // - // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` - // * :ref:`config_http_filters_router_x-envoy-retry-on` - // - // Value must be an integer: - // - // * :ref:`config_http_filters_router_x-envoy-max-retries` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - repeated string strict_check_headers = 5 [(validate.rules).repeated = { - items { - string { - in: "x-envoy-upstream-rq-timeout-ms" - in: "x-envoy-upstream-rq-per-try-timeout-ms" - in: "x-envoy-max-retries" - in: "x-envoy-retry-grpc-on" - in: "x-envoy-retry-on" - } - } - }]; - - // If not set, ingress Envoy will ignore - // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress - // Envoy, when deriving timeout for upstream cluster. - bool respect_expected_rq_timeout = 6; -} diff --git a/generated_api_shadow/envoy/config/filter/http/squash/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/squash/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/squash/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto b/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto deleted file mode 100644 index a7ae625d2ee37..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.squash.v2; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v2"; -option java_outer_classname = "SquashProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.squash.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Squash] -// Squash :ref:`configuration overview `. -// [#extension: envoy.filters.http.squash] - -// [#next-free-field: 6] -message Squash { - // The name of the cluster that hosts the Squash server. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // When the filter requests the Squash server to create a DebugAttachment, it will use this - // structure as template for the body of the request. It can contain reference to environment - // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server - // with more information to find the process to attach the debugger to. For example, in a - // Istio/k8s environment, this will contain information on the pod: - // - // .. code-block:: json - // - // { - // "spec": { - // "attachment": { - // "pod": "{{ POD_NAME }}", - // "namespace": "{{ POD_NAMESPACE }}" - // }, - // "match_request": true - // } - // } - // - // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) - google.protobuf.Struct attachment_template = 2; - - // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second. - google.protobuf.Duration request_timeout = 3; - - // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60 - // seconds. - google.protobuf.Duration attachment_timeout = 4; - - // Amount of time to poll for the status of the attachment object in the Squash server - // (to check if has been attached). Defaults to 1 second. - google.protobuf.Duration attachment_poll_period = 5; -} diff --git a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/BUILD deleted file mode 100644 index cf02fc6c0b1f9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto b/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto deleted file mode 100644 index 3f984cec0d6c3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.tap.v2alpha; - -import "envoy/config/common/tap/v2alpha/common.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.tap.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap] -// Tap :ref:`configuration overview `. -// [#extension: envoy.filters.http.tap] - -// Top level configuration for the tap filter. -message Tap { - // Common configuration for the HTTP tap filter. - common.tap.v2alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/transcoder/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto b/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto deleted file mode 100644 index ac6d7eefa78a0..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto +++ /dev/null @@ -1,159 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.transcoder.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v2"; -option java_outer_classname = "TranscoderProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_json_transcoder.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC-JSON transcoder] -// gRPC-JSON transcoder :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_json_transcoder] - -// [#next-free-field: 10] -message GrpcJsonTranscoder { - message PrintOptions { - // Whether to add spaces, line breaks and indentation to make the JSON - // output easy to read. Defaults to false. - bool add_whitespace = 1; - - // Whether to always print primitive fields. By default primitive - // fields with default values will be omitted in JSON output. For - // example, an int32 field set to 0 will be omitted. Setting this flag to - // true will override the default behavior and print primitive fields - // regardless of their values. Defaults to false. - bool always_print_primitive_fields = 2; - - // Whether to always print enums as ints. By default they are rendered - // as strings. Defaults to false. - bool always_print_enums_as_ints = 3; - - // Whether to preserve proto field names. By default protobuf will - // generate JSON field names using the ``json_name`` option, or lower camel case, - // in that order. Setting this flag will preserve the original field names. Defaults to false. - bool preserve_proto_field_names = 4; - } - - oneof descriptor_set { - option (validate.required) = true; - - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - string proto_descriptor = 1; - - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - bytes proto_descriptor_bin = 4; - } - - // A list of strings that - // supplies the fully qualified service names (i.e. "package_name.service_name") that - // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, - // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than - // the service names specified here, but they won't be translated. - repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; - - // Control options for response JSON. These options are passed directly to - // `JsonPrintOptions `_. - PrintOptions print_options = 3; - - // Whether to keep the incoming request route after the outgoing headers have been transformed to - // the match the upstream gRPC service. Note: This means that routes for gRPC services that are - // not transcoded cannot be used in combination with *match_incoming_request_route*. - bool match_incoming_request_route = 5; - - // A list of query parameters to be ignored for transcoding method mapping. - // By default, the transcoder filter will not transcode a request if there are any - // unknown/invalid query parameters. - // - // Example : - // - // .. code-block:: proto - // - // service Bookstore { - // rpc GetShelf(GetShelfRequest) returns (Shelf) { - // option (google.api.http) = { - // get: "/shelves/{shelf}" - // }; - // } - // } - // - // message GetShelfRequest { - // int64 shelf = 1; - // } - // - // message Shelf {} - // - // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable - // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow - // the same request to be mapped to ``GetShelf``. - repeated string ignored_query_parameters = 6; - - // Whether to route methods without the ``google.api.http`` option. - // - // Example : - // - // .. code-block:: proto - // - // package bookstore; - // - // service Bookstore { - // rpc GetShelf(GetShelfRequest) returns (Shelf) {} - // } - // - // message GetShelfRequest { - // int64 shelf = 1; - // } - // - // message Shelf {} - // - // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of - // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. - bool auto_mapping = 7; - - // Whether to ignore query parameters that cannot be mapped to a corresponding - // protobuf field. Use this if you cannot control the query parameters and do - // not know them beforehand. Otherwise use ``ignored_query_parameters``. - // Defaults to false. - bool ignore_unknown_query_parameters = 8; - - // Whether to convert gRPC status headers to JSON. - // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` - // from the ``grpc-status-details-bin`` header and use it as JSON body. - // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and - // ``grpc-message`` headers. - // The error details types must be present in the ``proto_descriptor``. - // - // For example, if an upstream server replies with headers: - // - // .. code-block:: none - // - // grpc-status: 5 - // grpc-status-details-bin: - // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ - // - // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message - // ``google.rpc.Status``. It will be transcoded into: - // - // .. code-block:: none - // - // HTTP/1.1 404 Not Found - // content-type: application/json - // - // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} - // - // In order to transcode the message, the ``google.rpc.RequestInfo`` type from - // the ``google/rpc/error_details.proto`` should be included in the configured - // :ref:`proto descriptor set `. - bool convert_grpc_status = 9; -} diff --git a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/BUILD b/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto b/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto deleted file mode 100644 index 0496207e09bcb..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.http_inspector.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.http_inspector.v2"; -option java_outer_classname = "HttpInspectorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.http_inspector.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP Inspector Filter] -// Detect whether the application protocol is HTTP. -// [#extension: envoy.filters.listener.http_inspector] - -message HttpInspector { -} diff --git a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/BUILD b/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto b/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto deleted file mode 100644 index fa4acee45fc11..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.original_dst.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.original_dst.v2"; -option java_outer_classname = "OriginalDstProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.original_dst.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Original Dst Filter] -// Use the Original destination address on downstream connections. -// [#extension: envoy.filters.listener.original_dst] - -message OriginalDst { -} diff --git a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto b/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto deleted file mode 100644 index f9ddb98e745c7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.original_src.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1"; -option java_outer_classname = "OriginalSrcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.original_src.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Original Src Filter] -// Use the Original source address on upstream connections. -// [#extension: envoy.filters.listener.original_src] - -// The Original Src filter binds upstream connections to the original source address determined -// for the connection. This address could come from something like the Proxy Protocol filter, or it -// could come from trusted http headers. -message OriginalSrc { - // Whether to bind the port to the one used in the original downstream connection. - // [#not-implemented-hide:] - bool bind_port = 1; - - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to - // ensure that non-local addresses may be routed back through envoy when binding to the original - // source address. The option will not be applied if the mark is 0. - uint32 mark = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/BUILD b/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto b/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto deleted file mode 100644 index cabffb9fc0c05..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.proxy_protocol.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.proxy_protocol.v2"; -option java_outer_classname = "ProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.proxy_protocol.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Proxy Protocol Filter] -// PROXY protocol listener filter. -// [#extension: envoy.filters.listener.proxy_protocol] - -message ProxyProtocol { -} diff --git a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/BUILD b/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto b/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto deleted file mode 100644 index 7ab679c47dc57..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.tls_inspector.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.tls_inspector.v2"; -option java_outer_classname = "TlsInspectorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.tls_inspector.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: TLS Inspector Filter] -// Allows detecting whether the transport appears to be TLS or plaintext. -// [#extension: envoy.filters.listener.tls_inspector] - -message TlsInspector { -} diff --git a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto deleted file mode 100644 index 4da6d97ca2992..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.client_ssl_auth.v2; - -import "envoy/api/v2/core/address.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2"; -option java_outer_classname = "ClientSslAuthProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.client_ssl_auth.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Client TLS authentication] -// Client TLS authentication -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.client_ssl_auth] - -message ClientSSLAuth { - // The :ref:`cluster manager ` cluster that runs - // the authentication service. The filter will connect to the service every 60s to fetch the list - // of principals. The service must support the expected :ref:`REST API - // `. - string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Time in milliseconds between principal refreshes from the - // authentication service. Default is 60000 (60s). The actual fetch time - // will be this value plus a random jittered value between - // 0-refresh_delay_ms milliseconds. - google.protobuf.Duration refresh_delay = 3; - - // An optional list of IP address and subnet masks that should be white - // listed for access by the filter. If no list is provided, there is no - // IP allowlist. - repeated api.v2.core.CidrRange ip_white_list = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/direct_response/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto b/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto deleted file mode 100644 index 15de7e3b55379..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.direct_response.v2; - -import "envoy/api/v2/core/base.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.direct_response.v2"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.direct_response.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Direct response] -// Direct response :ref:`configuration overview `. -// [#extension: envoy.filters.network.direct_response] - -message Config { - // Response data as a data source. - api.v2.core.DataSource response = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD deleted file mode 100644 index 5fe475a5dcf8d..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/route:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md deleted file mode 100644 index c83caca1f8f4d..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md +++ /dev/null @@ -1 +0,0 @@ -Protocol buffer definitions for the Dubbo proxy. diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto deleted file mode 100644 index 47248932f94ce..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ /dev/null @@ -1,66 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.dubbo_proxy.v2alpha1; - -import "envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; -option java_outer_classname = "DubboProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.dubbo_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dubbo Proxy] -// Dubbo Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.dubbo_proxy] - -// Dubbo Protocol types supported by Envoy. -enum ProtocolType { - // the default protocol. - Dubbo = 0; -} - -// Dubbo Serialization types supported by Envoy. -enum SerializationType { - // the default serialization protocol. - Hessian2 = 0; -} - -// [#next-free-field: 6] -message DubboProxy { - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Configure the protocol used. - ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; - - // Configure the serialization protocol used. - SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; - - // The route table for the connection manager is static and is specified in this property. - repeated RouteConfiguration route_config = 4; - - // A list of individual Dubbo filters that make up the filter chain for requests made to the - // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no dubbo_filters are specified, a default Dubbo router filter - // (`envoy.filters.dubbo.router`) is used. - repeated DubboFilter dubbo_filters = 5; -} - -// DubboFilter configures a Dubbo filter. -message DubboFilter { - // The name of the filter to instantiate. The name must match a supported - // filter. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any config = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto deleted file mode 100644 index 9af461e3577cb..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.dubbo_proxy.v2alpha1; - -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/matcher/string.proto"; -import "envoy/type/range.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.dubbo_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dubbo Proxy Route Configuration] -// Dubbo Proxy :ref:`configuration overview `. - -// [#next-free-field: 6] -message RouteConfiguration { - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The interface name of the service. - string interface = 2; - - // Which group does the interface belong to. - string group = 3; - - // The version number of the interface. - string version = 4; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 5; -} - -message Route { - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - // Method level routing matching. - MethodMatch method = 1; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated api.v2.route.HeaderMatcher headers = 2; -} - -message RouteAction { - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - // Currently ClusterWeight only supports the name and weight fields. - api.v2.route.WeightedCluster weighted_clusters = 2; - } -} - -message MethodMatch { - // The parameter matching type. - message ParameterMatchSpecifier { - oneof parameter_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 3; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting - // of an optional plus or minus sign followed by a sequence of digits. The rule will not match - // if the header value does not represent an integer. Match will fail for empty values, - // floating point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, - // "somestring", 10.9, "-1somestring" - type.Int64Range range_match = 4; - } - } - - // The name of the method. - type.matcher.StringMatcher name = 1; - - // Method parameter definition. - // The key is the parameter index, starting from 0. - // The value is the parameter matching type. - map params_match = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/network/echo/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/echo/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/echo/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto b/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto deleted file mode 100644 index 2b51ce4e18c36..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.echo.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.echo.v2"; -option java_outer_classname = "EchoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.echo.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Echo] -// Echo :ref:`configuration overview `. -// [#extension: envoy.filters.network.echo] - -message Echo { -} diff --git a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto deleted file mode 100644 index 40cea7061868c..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.ext_authz.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v2"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.ext_authz.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Network External Authorization ] -// The network layer external authorization service configuration -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.ext_authz] - -// External Authorization filter calls out to an external service over the -// gRPC Authorization API defined by -// :ref:`CheckRequest `. -// A failed check will cause this filter to close the TCP connection. -message ExtAuthz { - // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The external authorization gRPC service configuration. - // The default timeout is set to 200ms by this filter. - api.v2.core.GrpcService grpc_service = 2; - - // The filter's behaviour in case the external authorization service does - // not respond back. When it is set to true, Envoy will also allow traffic in case of - // communication failure between authorization service and the proxy. - // Defaults to false. - bool failure_mode_allow = 3; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/BUILD deleted file mode 100644 index b03bcd437c3df..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/config/trace/v2:pkg", - "//envoy/type:pkg", - "//envoy/type/tracing/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto deleted file mode 100644 index 3e7a4dc17769c..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ /dev/null @@ -1,679 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.http_connection_manager.v2; - -import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/core/protocol.proto"; -import "envoy/api/v2/route.proto"; -import "envoy/api/v2/scoped_route.proto"; -import "envoy/config/filter/accesslog/v2/accesslog.proto"; -import "envoy/config/trace/v2/http_tracer.proto"; -import "envoy/type/percent.proto"; -import "envoy/type/tracing/v2/custom_tag.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2"; -option java_outer_classname = "HttpConnectionManagerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.http_connection_manager.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP connection manager] -// HTTP connection manager :ref:`configuration overview `. -// [#extension: envoy.filters.network.http_connection_manager] - -// [#next-free-field: 37] -message HttpConnectionManager { - enum CodecType { - // For every new connection, the connection manager will determine which - // codec to use. This mode supports both ALPN for TLS listeners as well as - // protocol inference for plaintext listeners. If ALPN data is available, it - // is preferred, otherwise protocol inference is used. In almost all cases, - // this is the right option to choose for this setting. - AUTO = 0; - - // The connection manager will assume that the client is speaking HTTP/1.1. - HTTP1 = 1; - - // The connection manager will assume that the client is speaking HTTP/2 - // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. - // Prior knowledge is allowed). - HTTP2 = 2; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 3; - } - - enum ServerHeaderTransformation { - // Overwrite any Server header with the contents of server_name. - OVERWRITE = 0; - - // If no Server header is present, append Server server_name - // If a Server header is present, pass it through. - APPEND_IF_ABSENT = 1; - - // Pass through the value of the server header, and do not append a header - // if none is present. - PASS_THROUGH = 2; - } - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - enum ForwardClientCertDetails { - // Do not send the XFCC header to the next hop. This is the default value. - SANITIZE = 0; - - // When the client connection is mTLS (Mutual TLS), forward the XFCC header - // in the request. - FORWARD_ONLY = 1; - - // When the client connection is mTLS, append the client certificate - // information to the request’s XFCC header and forward it. - APPEND_FORWARD = 2; - - // When the client connection is mTLS, reset the XFCC header with the client - // certificate information and send it to the next hop. - SANITIZE_SET = 3; - - // Always forward the XFCC header in the request, regardless of whether the - // client connection is mTLS. - ALWAYS_FORWARD_ONLY = 4; - } - - // [#next-free-field: 10] - message Tracing { - enum OperationName { - // The HTTP listener is used for ingress/incoming requests. - INGRESS = 0; - - // The HTTP listener is used for egress/outgoing requests. - EGRESS = 1; - } - - // The span name will be derived from this field. If - // :ref:`traffic_direction ` is - // specified on the parent listener, then it is used instead of this field. - // - // .. attention:: - // This field has been deprecated in favor of `traffic_direction`. - OperationName operation_name = 1 [ - deprecated = true, - (validate.rules).enum = {defined_only: true}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // A list of header names used to create tags for the active span. The header name is used to - // populate the tag name, and the header value is used to populate the tag value. The tag is - // created if the specified header name is present in the request's headers. - // - // .. attention:: - // This field has been deprecated in favor of :ref:`custom_tags - // `. - repeated string request_headers_for_tags = 2 [deprecated = true]; - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.Percent client_sampling = 3; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.Percent random_sampling = 4; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.Percent overall_sampling = 5; - - // Whether to annotate spans with additional data. If true, spans will include logs for stream - // events. - bool verbose = 6; - - // Maximum length of the request path to extract and include in the HttpUrl tag. Used to - // truncate lengthy request paths to meet the needs of a tracing backend. - // Default: 256 - google.protobuf.UInt32Value max_path_tag_length = 7; - - // A list of custom tags with unique tag name to create tags for the active span. - repeated type.tracing.v2.CustomTag custom_tags = 8; - - // Configuration for an external tracing provider. - // If not specified, no tracing will be performed. - // - // .. attention:: - // Please be aware that *envoy.tracers.opencensus* provider can only be configured once - // in Envoy lifetime. - // Any attempts to reconfigure it or to use different configurations for different HCM filters - // will be rejected. - // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes - // on OpenCensus side. - trace.v2.Tracing.Http provider = 9; - } - - message InternalAddressConfig { - // Whether unix socket addresses should be considered internal. - bool unix_sockets = 1; - } - - // [#next-free-field: 7] - message SetCurrentClientCertDetails { - reserved 2; - - // Whether to forward the subject of the client cert. Defaults to false. - google.protobuf.BoolValue subject = 1; - - // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - // XFCC header comma separated from other values with the value Cert="PEM". - // Defaults to false. - bool cert = 3; - - // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - // format. This will appear in the XFCC header comma separated from other values with the value - // Chain="PEM". - // Defaults to false. - bool chain = 6; - - // Whether to forward the DNS type Subject Alternative Names of the client cert. - // Defaults to false. - bool dns = 4; - - // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - // false. - bool uri = 5; - } - - // The configuration for HTTP upgrades. - // For each upgrade type desired, an UpgradeConfig must be added. - // - // .. warning:: - // - // The current implementation of upgrade headers does not handle - // multi-valued upgrade headers. Support for multi-valued headers may be - // added in the future if needed. - // - // .. warning:: - // The current implementation of upgrade headers does not work with HTTP/2 - // upstreams. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] - // will be proxied upstream. - string upgrade_type = 1; - - // If present, this represents the filter chain which will be created for - // this type of upgrade. If no filters are present, the filter chain for - // HTTP connections will be used for this upgrade type. - repeated HttpFilter filters = 2; - - // Determines if upgrades are enabled or disabled by default. Defaults to true. - // This can be overridden on a per-route basis with :ref:`cluster - // ` as documented in the - // :ref:`upgrade documentation `. - google.protobuf.BoolValue enabled = 3; - } - - reserved 27; - - // Supplies the type of codec that the connection manager should use. - CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics for the - // connection manager. See the :ref:`statistics documentation ` for - // more information. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; - - // The route table for the connection manager is static and is specified in this property. - api.v2.RouteConfiguration route_config = 4; - - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } - - // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. :ref:`Order matters ` - // as the filters are processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; - - // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; - - // Presence of the object defines whether the connection manager - // emits :ref:`tracing ` data to the :ref:`configured tracing provider - // `. - Tracing tracing = 7; - - // Additional settings for HTTP requests handled by the connection manager. These will be - // applicable to both HTTP1 and HTTP2 requests. - api.v2.core.HttpProtocolOptions common_http_protocol_options = 35; - - // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - api.v2.core.Http1ProtocolOptions http_protocol_options = 8; - - // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - api.v2.core.Http2ProtocolOptions http2_protocol_options = 9; - - // An optional override that the connection manager will write to the server - // header in responses. If not set, the default is *envoy*. - string server_name = 10; - - // Defines the action to be applied to the Server header on the response path. - // By default, Envoy will overwrite the header with the value specified in - // server_name. - ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum = {defined_only: true}]; - - // The maximum request headers size for incoming connections. - // If unconfigured, the default max request headers allowed is 60 KiB. - // Requests that exceed this limit will receive a 431 response. - google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32 = {lte: 8192 gt: 0}]; - - // The idle timeout for connections managed by the connection manager. The - // idle timeout is defined as the period in which there are no active - // requests. If not set, there is no idle timeout. When the idle timeout is - // reached the connection will be closed. If the connection is an HTTP/2 - // connection a drain sequence will occur prior to closing the connection. - // This field is deprecated. Use :ref:`idle_timeout - // ` - // instead. - google.protobuf.Duration idle_timeout = 11 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // The stream idle timeout for connections managed by the connection manager. - // If not specified, this defaults to 5 minutes. The default value was selected - // so as not to interfere with any smaller configured timeouts that may have - // existed in configurations prior to the introduction of this feature, while - // introducing robustness to TCP connections that terminate without a FIN. - // - // This idle timeout applies to new streams and is overridable by the - // :ref:`route-level idle_timeout - // `. Even on a stream in - // which the override applies, prior to receipt of the initial request - // headers, the :ref:`stream_idle_timeout - // ` - // applies. Each time an encode/decode event for headers or data is processed - // for the stream, the timer will be reset. If the timeout fires, the stream - // is terminated with a 408 Request Timeout error code if no upstream response - // header has been received, otherwise a stream reset occurs. - // - // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough - // window to write any remaining stream data once the entirety of stream data (local end stream is - // true) has been buffered pending available window. In other words, this timeout defends against - // a peer that does not release enough window to completely write the stream, even though all - // data has been proxied within available flow control windows. If the timeout is hit in this - // case, the :ref:`tx_flush_timeout ` counter will be - // incremented. Note that :ref:`max_stream_duration - // ` does not apply to this corner - // case. - // - // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due - // to the granularity of events presented to the connection manager. For example, while receiving - // very large request headers, it may be the case that there is traffic regularly arriving on the - // wire while the connection manage is only able to observe the end-of-headers event, hence the - // stream may still idle timeout. - // - // A value of 0 will completely disable the connection manager stream idle - // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; - - // The amount of time that Envoy will wait for the entire request to be received. - // The timer is activated when the request is initiated, and is disarmed when the last byte of the - // request is sent upstream (i.e. all decoding filters have processed the request), OR when the - // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; - - // The time that Envoy will wait between sending an HTTP/2 “shutdown - // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - // This is used so that Envoy provides a grace period for new streams that - // race with the final GOAWAY frame. During this grace period, Envoy will - // continue to accept new streams. After the grace period, a final GOAWAY - // frame is sent and Envoy will start refusing new streams. Draining occurs - // both when a connection hits the idle timeout or during general server - // draining. The default grace period is 5000 milliseconds (5 seconds) if this - // option is not specified. - google.protobuf.Duration drain_timeout = 12; - - // The delayed close timeout is for downstream connections managed by the HTTP connection manager. - // It is defined as a grace period after connection close processing has been locally initiated - // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy - // from the downstream connection) prior to Envoy closing the socket associated with that - // connection. - // NOTE: This timeout is enforced even when the socket associated with the downstream connection - // is pending a flush of the write buffer. However, any progress made writing data to the socket - // will restart the timer associated with this timeout. This means that the total grace period for - // a socket in this state will be - // +. - // - // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close - // sequence mitigates a race condition that exists when downstream clients do not drain/process - // data in a connection's receive buffer after a remote close has been detected via a socket - // write(). This race leads to such clients failing to process the response code sent by Envoy, - // which could result in erroneous downstream processing. - // - // If the timeout triggers, Envoy will close the connection's socket. - // - // The default timeout is 1000 ms if this option is not specified. - // - // .. NOTE:: - // To be useful in avoiding the race condition described above, this timeout must be set - // to *at least* +<100ms to account for - // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. - // - // .. WARNING:: - // A value of 0 will completely disable delayed close processing. When disabled, the downstream - // connection's socket will be closed immediately after the write flush is completed or will - // never close if the write flush does not complete. - google.protobuf.Duration delayed_close_timeout = 26; - - // Configuration for :ref:`HTTP access logs ` - // emitted by the connection manager. - repeated accesslog.v2.AccessLog access_log = 13; - - // If set to true, the connection manager will use the real remote address - // of the client connection when determining internal versus external origin and manipulating - // various headers. If set to false or absent, the connection manager will use the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for`, - // :ref:`config_http_conn_man_headers_x-envoy-internal`, and - // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; - - // The number of additional ingress proxy hops from the right side of the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when - // determining the origin client's IP address. The default is zero if this option - // is not specified. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - uint32 xff_num_trusted_hops = 19; - - // Configures what network addresses are considered internal for stats and header sanitation - // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. - // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information about internal/external addresses. - InternalAddressConfig internal_address_config = 25; - - // If set, Envoy will not append the remote address to the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in - // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager - // has mutated the request headers. While :ref:`use_remote_address - // ` - // will also suppress XFF addition, it has consequences for logging and other - // Envoy uses of the remote address, so *skip_xff_append* should be used - // when only an elision of XFF addition is intended. - bool skip_xff_append = 21; - - // Via header value to append to request and response headers. If this is - // empty, no via header will be appended. - string via = 22; - - // Whether the connection manager will generate the :ref:`x-request-id - // ` header if it does not exist. This defaults to - // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature - // is not desired it can be disabled. - google.protobuf.BoolValue generate_request_id = 15; - - // Whether the connection manager will keep the :ref:`x-request-id - // ` header if passed for a request that is edge - // (Edge request is the request from external clients to front Envoy) and not reset it, which - // is the current Envoy behaviour. This defaults to false. - bool preserve_external_request_id = 32; - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; - - // This field is valid only when :ref:`forward_client_cert_details - // ` - // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in - // the client certificate to be forwarded. Note that in the - // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and - // *By* is always set when the client certificate presents the URI type Subject Alternative Name - // value. - SetCurrentClientCertDetails set_current_client_cert_details = 17; - - // If proxy_100_continue is true, Envoy will proxy incoming "Expect: - // 100-continue" headers upstream, and forward "100 Continue" responses - // downstream. If this is false or not set, Envoy will instead strip the - // "Expect: 100-continue" header, and send a "100 Continue" response itself. - bool proxy_100_continue = 18; - - // If - // :ref:`use_remote_address - // ` - // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is - // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. - // This is useful for testing compatibility of upstream services that parse the header value. For - // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses - // `_ for details. This will also affect the - // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See - // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - // ` for runtime - // control. - // [#not-implemented-hide:] - bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - - repeated UpgradeConfig upgrade_configs = 23; - - // Should paths be normalized according to RFC 3986 before any processing of - // requests by HTTP filters or routing? This affects the upstream *:path* header - // as well. For paths that fail this check, Envoy will respond with 400 to - // paths that are malformed. This defaults to false currently but will default - // true in the future. When not specified, this value may be overridden by the - // runtime variable - // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison `_ - // for details of normalization. - // Note that Envoy does not perform - // `case normalization `_ - google.protobuf.BoolValue normalize_path = 30; - - // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec `_ and is provided for convenience. - bool merge_slashes = 33; - - // The configuration of the request ID extension. This includes operations such as - // generation, validation, and associated tracing operations. - // - // If not set, Envoy uses the default UUID-based behavior: - // - // 1. Request ID is propagated using *x-request-id* header. - // - // 2. Request ID is a universally unique identifier (UUID). - // - // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. - RequestIDExtension request_id_extension = 36; -} - -message Rds { - // Configuration source specifier for RDS. - api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; -} - -// This message is used to work around the limitations with 'oneof' and repeated fields. -message ScopedRouteConfigurationsList { - repeated api.v2.ScopedRouteConfiguration scoped_route_configurations = 1 - [(validate.rules).repeated = {min_items: 1}]; -} - -// [#next-free-field: 6] -message ScopedRoutes { - // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These - // keys are matched against a set of :ref:`Key` - // objects assembled from :ref:`ScopedRouteConfiguration` - // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via - // :ref:`scoped_route_configurations_list`. - // - // Upon receiving a request's headers, the Router will build a key using the algorithm specified - // by this message. This key will be used to look up the routing table (i.e., the - // :ref:`RouteConfiguration`) to use for the request. - message ScopeKeyBuilder { - // Specifies the mechanism for constructing key fragments which are composed into scope keys. - message FragmentBuilder { - // Specifies how the value of a header should be extracted. - // The following example maps the structure of a header to the fields in this message. - // - // .. code:: - // - // <0> <1> <-- index - // X-Header: a=b;c=d - // | || | - // | || \----> - // | || - // | |\----> - // | | - // | \----> - // | - // \----> - // - // Each 'a=b' key-value pair constitutes an 'element' of the header field. - message HeaderValueExtractor { - // Specifies a header field's key value pair to match on. - message KvElement { - // The separator between key and value (e.g., '=' separates 'k=v;...'). - // If an element is an empty string, the element is ignored. - // If an element contains no separator, the whole element is parsed as key and the - // fragment value is an empty string. - // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The key to match on. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // The name of the header field to extract the value from. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The element separator (e.g., ';' separates 'a;b;c;d'). - // Default: empty string. This causes the entirety of the header field to be extracted. - // If this field is set to an empty string and 'index' is used in the oneof below, 'index' - // must be set to 0. - string element_separator = 2; - - oneof extract_type { - // Specifies the zero based index of the element to extract. - // Note Envoy concatenates multiple values of the same header key into a comma separated - // string, the splitting always happens after the concatenation. - uint32 index = 3; - - // Specifies the key value pair to extract the value from. - KvElement element = 4; - } - } - - oneof type { - option (validate.required) = true; - - // Specifies how a header field's value should be extracted. - HeaderValueExtractor header_value_extractor = 1; - } - } - - // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the - // fragments of a :ref:`ScopedRouteConfiguration`. - // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. - repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The algorithm to use for constructing a scope key for each request. - ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; - - // Configuration source specifier for RDS. - // This config source is used to subscribe to RouteConfiguration resources specified in - // ScopedRouteConfiguration messages. - api.v2.core.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; - - oneof config_specifier { - option (validate.required) = true; - - // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by - // matching a key constructed from the request's attributes according to the algorithm specified - // by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRouteConfigurationsList scoped_route_configurations_list = 4; - - // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS - // API. A scope is assigned to a request by matching a key constructed from the request's - // attributes according to the algorithm specified by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRds scoped_rds = 5; - } -} - -message ScopedRds { - // Configuration source specifier for scoped RDS. - api.v2.core.ConfigSource scoped_rds_config_source = 1 - [(validate.rules).message = {required: true}]; -} - -message HttpFilter { - reserved 3; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 4; - } -} - -message RequestIDExtension { - // Request ID extension specific configuration. - google.protobuf.Any typed_config = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto b/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto deleted file mode 100644 index ea2f60e71eed3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.kafka_broker.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.kafka_broker.v2alpha1"; -option java_outer_classname = "KafkaBrokerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.kafka_broker.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Kafka Broker] -// Kafka Broker :ref:`configuration overview `. -// [#extension: envoy.filters.network.kafka_broker] - -message KafkaBroker { - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD deleted file mode 100644 index 2ffbc958786b3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto b/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto deleted file mode 100644 index 791b767f3e6aa..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.local_rate_limit.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/token_bucket.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.local_rate_limit.v2alpha"; -option java_outer_classname = "LocalRateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.local_ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Local rate limit] -// Local rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.local_ratelimit] - -message LocalRateLimit { - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The token bucket configuration to use for rate limiting connections that are processed by the - // filter's filter chain. Each incoming connection processed by the filter consumes a single - // token. If the token is available, the connection will be allowed. If no tokens are available, - // the connection will be immediately closed. - // - // .. note:: - // In the current implementation each filter and filter chain has an independent rate limit. - // - // .. note:: - // In the current implementation the token bucket's :ref:`fill_interval - // ` must be >= 50ms to avoid too aggressive - // refills. - type.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}]; - - // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults - // to enabled. - api.v2.core.RuntimeFeatureFlag runtime_enabled = 3; -} diff --git a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/BUILD deleted file mode 100644 index b4f275ad5f870..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/filter/fault/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto deleted file mode 100644 index b261897858e21..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.mongo_proxy.v2; - -import "envoy/config/filter/fault/v2/fault.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2"; -option java_outer_classname = "MongoProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.mongo_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Mongo proxy] -// MongoDB :ref:`configuration overview `. -// [#extension: envoy.filters.network.mongo_proxy] - -message MongoProxy { - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The optional path to use for writing Mongo access logs. If not access log - // path is specified no access logs will be written. Note that access log is - // also gated :ref:`runtime `. - string access_log = 2; - - // Inject a fixed delay before proxying a Mongo operation. Delays are - // applied to the following MongoDB operations: Query, Insert, GetMore, - // and KillCursors. Once an active delay is in progress, all incoming - // data up until the timer event fires will be a part of the delay. - fault.v2.FaultDelay delay = 3; - - // Flag to specify whether :ref:`dynamic metadata - // ` should be emitted. Defaults to false. - bool emit_dynamic_metadata = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto b/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto deleted file mode 100644 index 78c6b7e971df4..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.mysql_proxy.v1alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1"; -option java_outer_classname = "MysqlProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.mysql_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: MySQL proxy] -// MySQL Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.mysql_proxy] - -message MySQLProxy { - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // [#not-implemented-hide:] The optional path to use for writing MySQL access logs. - // If the access log field is empty, access logs will not be written. - string access_log = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/BUILD deleted file mode 100644 index 6d29e84c421c9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/ratelimit:pkg", - "//envoy/config/ratelimit/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto deleted file mode 100644 index aed56c9af6292..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.rate_limit.v2; - -import "envoy/api/v2/ratelimit/ratelimit.proto"; -import "envoy/config/ratelimit/v2/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v2"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.ratelimit] - -// [#next-free-field: 7] -message RateLimit { - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The rate limit descriptor list to use in the rate limit service request. - repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 3 - [(validate.rules).repeated = {min_items: 1}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 5; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/rbac/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/rbac/v2/BUILD deleted file mode 100644 index 90082d083a3f6..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/rbac/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto b/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto deleted file mode 100644 index ce86794c71cc3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.rbac.v2; - -import "envoy/config/rbac/v2/rbac.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v2"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.rbac.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.network.rbac] - -// RBAC network filter config. -// -// Header should not be used in rules/shadow_rules in RBAC network filter as -// this information is only available in :ref:`RBAC http filter `. -message RBAC { - enum EnforcementType { - // Apply RBAC policies when the first byte of data arrives on the connection. - ONE_TIME_ON_FIRST_BYTE = 0; - - // Continuously apply RBAC policies as data arrives. Use this mode when - // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, - // etc. when the protocol decoders emit dynamic metadata such as the - // resources being accessed and the operations on the resources. - CONTINUOUS = 1; - } - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - config.rbac.v2.RBAC rules = 1; - - // Shadow rules are not enforced by the filter but will emit stats and logs - // and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v2.RBAC shadow_rules = 2; - - // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; - - // RBAC enforcement strategy. By default RBAC will be enforced only once - // when the first byte of data arrives from the downstream. When used in - // conjunction with filters that emit dynamic metadata after decoding - // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to - // CONTINUOUS to enforce RBAC policies on every message boundary. - EnforcementType enforcement_type = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/BUILD deleted file mode 100644 index f91701518907a..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto deleted file mode 100644 index 948d7c349ff00..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ /dev/null @@ -1,245 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.redis_proxy.v2; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v2"; -option java_outer_classname = "RedisProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.redis_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Redis Proxy] -// Redis Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.redis_proxy] - -// [#next-free-field: 7] -message RedisProxy { - // Redis connection pool settings. - // [#next-free-field: 9] - message ConnPoolSettings { - // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently - // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data - // because replication is asynchronous and requires some delay. You need to ensure that your - // application can tolerate stale data. - enum ReadPolicy { - // Default mode. Read from the current primary node. - MASTER = 0; - - // Read from the primary, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1; - - // Read from replica nodes. If multiple replica nodes are present within a shard, a random - // node is selected. Healthy nodes have precedent over unhealthy nodes. - REPLICA = 2; - - // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the primary. - PREFER_REPLICA = 3; - - // Read from any node of the cluster. A random node is selected among the primary and - // replicas, healthy nodes have precedent over unhealthy nodes. - ANY = 4; - } - - // Per-operation timeout in milliseconds. The timer starts when the first - // command of a pipeline is written to the backend connection. Each response received from Redis - // resets the timer since it signifies that the next command is being processed by the backend. - // The only exception to this behavior is when a connection to a backend is not yet established. - // In that case, the connect timeout on the cluster will govern the timeout until the connection - // is ready. - google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}]; - - // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be - // forwarded to the same upstream. The hash key used for determining the upstream in a - // consistent hash ring configuration will be computed from the hash tagged key instead of the - // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster - // implementation `_. - // - // Examples: - // - // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream - // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream - bool enable_hashtagging = 2; - - // Accept `moved and ask redirection - // `_ errors from upstream - // redis servers, and retry commands to the specified target server. The target server does not - // need to be known to the cluster manager. If the command cannot be redirected, then the - // original error is passed downstream unchanged. By default, this support is not enabled. - bool enable_redirection = 3; - - // Maximum size of encoded request buffer before flush is triggered and encoded requests - // are sent upstream. If this is unset, the buffer flushes whenever it receives data - // and performs no batching. - // This feature makes it possible for multiple clients to send requests to Envoy and have - // them batched- for example if one is running several worker processes, each with its own - // Redis connection. There is no benefit to using this with a single downstream process. - // Recommended size (if enabled) is 1024 bytes. - uint32 max_buffer_size_before_flush = 4; - - // The encoded request buffer is flushed N milliseconds after the first request has been - // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`. - // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise, - // the timer should be set according to the number of clients, overall request rate and - // desired maximum latency for a single command. For example, if there are many requests - // being batched together at a high rate, the buffer will likely be filled before the timer - // fires. Alternatively, if the request rate is lower the buffer will not be filled as often - // before the timer fires. - // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter - // defaults to 3ms. - google.protobuf.Duration buffer_flush_timeout = 5; - - // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts - // can be created at any given time by any given worker thread (see `enable_redirection` for - // more details). If the host is unknown and a connection cannot be created due to enforcing - // this limit, then redirection will fail and the original redirection error will be passed - // downstream unchanged. This limit defaults to 100. - google.protobuf.UInt32Value max_upstream_unknown_connections = 6; - - // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate - // count. - bool enable_command_stats = 8; - - // Read policy. The default is to read from the primary. - ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; - } - - message PrefixRoutes { - message Route { - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If not specified or the runtime key is not present, all requests to the target cluster - // will be mirrored. - // - // If specified, Envoy will lookup the runtime key to get the percentage of requests to the - // mirror. - api.v2.core.RuntimeFractionalPercent runtime_fraction = 2; - - // Set this to TRUE to only mirror write commands, this is effectively replicating the - // writes in a "fire and forget" manner. - bool exclude_read_commands = 3; - } - - // String prefix that must match the beginning of the keys. Envoy will always favor the - // longest match. - string prefix = 1; - - // Indicates if the prefix needs to be removed from the key when forwarded. - bool remove_prefix = 2; - - // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string = {min_bytes: 1}]; - - // Indicates that the route has a request mirroring policy. - repeated RequestMirrorPolicy request_mirror_policy = 4; - } - - // List of prefix routes. - repeated Route routes = 1; - - // Indicates that prefix matching should be case insensitive. - bool case_insensitive = 2; - - // Optional catch-all route to forward commands that doesn't match any of the routes. The - // catch-all route becomes required when no routes are specified. - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch_all - // route` - // instead. - string catch_all_cluster = 3 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Optional catch-all route to forward commands that doesn't match any of the routes. The - // catch-all route becomes required when no routes are specified. - Route catch_all_route = 4; - } - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Name of cluster from cluster manager. See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing cluster. - // - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch_all - // route` - // instead. - string cluster = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Network settings for the connection pool to the upstream clusters. - ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; - - // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. - bool latency_in_micros = 4; - - // List of **unique** prefixes used to separate keys from different workloads to different - // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all - // cluster can be used to forward commands when there is no match. Time complexity of the - // lookups are in O(min(longest key prefix, key length)). - // - // Example: - // - // .. code-block:: yaml - // - // prefix_routes: - // routes: - // - prefix: "ab" - // cluster: "cluster_a" - // - prefix: "abc" - // cluster: "cluster_b" - // - // When using the above routes, the following prefixes would be sent to: - // - // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b. - // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a. - // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all - // route` - // would have retrieved the key from that cluster instead. - // - // See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing clusters. - PrefixRoutes prefix_routes = 5; - - // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis - // AUTH command `_ with this password before enabling any other - // command. If an AUTH command's password matches this password, an "OK" response will be returned - // to the client. If the AUTH command password does not match this password, then an "ERR invalid - // password" error will be returned. If any other command is received before AUTH when this - // password is set, then a "NOAUTH Authentication required." error response will be sent to the - // client. If an AUTH command is received when the password is not set, then an "ERR Client sent - // AUTH, but no password is set" error will be returned. - api.v2.core.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; -} - -// RedisProtocolOptions specifies Redis upstream protocol options. This object is used in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.redis_proxy`. -message RedisProtocolOptions { - // Upstream server password as defined by the `requirepass` directive - // `_ in the server's configuration file. - api.v2.core.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto b/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto deleted file mode 100644 index 71c161fc48f69..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.sni_cluster.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.sni_cluster.v2"; -option java_outer_classname = "SniClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.sni_cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: SNI Cluster Filter] -// Set the upstream cluster name from the SNI field in the TLS connection. -// [#extension: envoy.filters.network.sni_cluster] - -message SniCluster { -} diff --git a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/BUILD deleted file mode 100644 index c02167a174de9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto deleted file mode 100644 index 4ec68f320eed7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ /dev/null @@ -1,184 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.tcp_proxy.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/config/filter/accesslog/v2/accesslog.proto"; -import "envoy/type/hash_policy.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2"; -option java_outer_classname = "TcpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.tcp_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: TCP Proxy] -// TCP Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.tcp_proxy] - -// [#next-free-field: 13] -message TcpProxy { - // [#not-implemented-hide:] Deprecated. - // TCP Proxy filter configuration using V1 format. - message DeprecatedV1 { - option deprecated = true; - - // A TCP proxy route consists of a set of optional L4 criteria and the - // name of a cluster. If a downstream connection matches all the - // specified criteria, the cluster in the route is used for the - // corresponding upstream connection. Routes are tried in the order - // specified until a match is found. If no match is found, the connection - // is closed. A route with no criteria is valid and always produces a - // match. - // [#next-free-field: 6] - message TCPRoute { - // The cluster to connect to when a the downstream network connection - // matches the specified criteria. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // An optional list of IP address subnets in the form - // “ip_address/xx”. The criteria is satisfied if the destination IP - // address of the downstream connection is contained in at least one of - // the specified subnets. If the parameter is not specified or the list - // is empty, the destination IP address is ignored. The destination IP - // address of the downstream connection might be different from the - // addresses on which the proxy is listening if the connection has been - // redirected. - repeated api.v2.core.CidrRange destination_ip_list = 2; - - // An optional string containing a comma-separated list of port numbers - // or ranges. The criteria is satisfied if the destination port of the - // downstream connection is contained in at least one of the specified - // ranges. If the parameter is not specified, the destination port is - // ignored. The destination port address of the downstream connection - // might be different from the port on which the proxy is listening if - // the connection has been redirected. - string destination_ports = 3; - - // An optional list of IP address subnets in the form - // “ip_address/xx”. The criteria is satisfied if the source IP address - // of the downstream connection is contained in at least one of the - // specified subnets. If the parameter is not specified or the list is - // empty, the source IP address is ignored. - repeated api.v2.core.CidrRange source_ip_list = 4; - - // An optional string containing a comma-separated list of port numbers - // or ranges. The criteria is satisfied if the source port of the - // downstream connection is contained in at least one of the specified - // ranges. If the parameter is not specified, the source port is - // ignored. - string source_ports = 5; - } - - // The route table for the filter. All filter instances must have a route - // table, even if it is empty. - repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Allows for specification of multiple upstream clusters along with weights - // that indicate the percentage of traffic to be forwarded to each cluster. - // The router selects an upstream cluster based on these weights. - message WeightedCluster { - message ClusterWeight { - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // When a request matches the route, the choice of an upstream cluster is - // determined by its weight. The sum of weights across all entries in the - // clusters array determines the total weight. - uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what is set in this field will be considered - // for load balancing. Note that this will be merged with what's provided in - // :ref:`TcpProxy.metadata_match - // `, with values - // here taking precedence. The filter name should be specified as *envoy.lb*. - api.v2.core.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Configuration for tunneling TCP over other transports or application layers. - // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will - // remain the default. - message TunnelingConfig { - // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } - - // Optional endpoint metadata match criteria. Only endpoints in the upstream - // cluster with metadata matching that set in metadata_match will be - // considered. The filter name should be specified as *envoy.lb*. - api.v2.core.Metadata metadata_match = 9; - - // The idle timeout for connections managed by the TCP proxy filter. The idle timeout - // is defined as the period in which there are no bytes sent or received on either - // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set - // to 0s, the timeout will be disabled. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - google.protobuf.Duration idle_timeout = 8; - - // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy - // filter. The idle timeout is defined as the period in which there is no - // active traffic. If not set, there is no idle timeout. When the idle timeout - // is reached the connection will be closed. The distinction between - // downstream_idle_timeout/upstream_idle_timeout provides a means to set - // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; - - // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; - - // Configuration for :ref:`access logs ` - // emitted by the this tcp_proxy. - repeated accesslog.v2.AccessLog access_log = 5; - - // [#not-implemented-hide:] Deprecated. - DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; - - // The maximum number of unsuccessful connection attempts that will be made before - // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based - // load balancing algorithms will select a host randomly. Currently the number of hash policies is - // limited to 1. - repeated type.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; - - // [#not-implemented-hide:] feature in progress - // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP - // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload - // will be proxied upstream as per usual. - TunnelingConfig tunneling_config = 12; -} diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD deleted file mode 100644 index 1e485f4e158ab..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md deleted file mode 100644 index a7d95c0d47640..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md +++ /dev/null @@ -1 +0,0 @@ -Protocol buffer definitions for the Thrift proxy. diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto deleted file mode 100644 index 8230a52e341e7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto +++ /dev/null @@ -1,141 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.thrift_proxy.v2alpha1; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/route/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.thrift_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Thrift Proxy Route Configuration] -// Thrift Proxy :ref:`configuration overview `. - -message RouteConfiguration { - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - oneof match_specifier { - option (validate.required) = true; - - // If specified, the route must exactly match the request method name. As a special case, an - // empty string matches any request method name. - string method_name = 1; - - // If specified, the route must have the service name as the request method name prefix. As a - // special case, an empty string matches any service name. Only relevant when service - // multiplexing. - string service_name = 2; - } - - // Inverts whatever matching is done in the :ref:`method_name - // ` or - // :ref:`service_name - // ` fields. - // Cannot be combined with wildcard matching as that would result in routes never being matched. - // - // .. note:: - // - // This does not invert matching done as part of the :ref:`headers field - // ` field. To - // invert header matching, see :ref:`invert_match - // `. - bool invert = 3; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). Note that this only applies for Thrift transports and/or - // protocols that support headers. - repeated api.v2.route.HeaderMatcher headers = 4; -} - -// [#next-free-field: 7] -message RouteAction { - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates a single upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 2; - - // Envoy will determine the cluster to route to by reading the value of the - // Thrift header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist Envoy will - // respond with an unknown method exception or an internal error exception, - // respectively. - string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; - } - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered. - // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match - // `, - // with values there taking precedence. Keys and values should be provided under the "envoy.lb" - // metadata key. - api.v2.core.Metadata metadata_match = 3; - - // Specifies a set of rate limit configurations that could be applied to the route. - // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders - // action with the header name ":method-name". - repeated api.v2.route.RateLimit rate_limits = 4; - - // Strip the service prefix from the method name, if there's a prefix. For - // example, the method call Service:method would end up being just method. - bool strip_service_name = 5; -} - -// Allows for specification of multiple upstream clusters along with weights that indicate the -// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster -// based on these weights. -message WeightedCluster { - message ClusterWeight { - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // When a request matches the route, the choice of an upstream cluster is determined by its - // weight. The sum of weights across all entries in the clusters array determines the total - // weight. - google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field, combined with what's - // provided in :ref:`RouteAction's metadata_match - // `, - // will be considered. Values here will take precedence. Keys and values should be provided - // under the "envoy.lb" metadata key. - api.v2.core.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto deleted file mode 100644 index 96e750ef310d7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto +++ /dev/null @@ -1,121 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.thrift_proxy.v2alpha1; - -import "envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; -option java_outer_classname = "ThriftProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.thrift_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Thrift Proxy] -// Thrift Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.thrift_proxy] - -// Thrift transport types supported by Envoy. -enum TransportType { - // For downstream connections, the Thrift proxy will attempt to determine which transport to use. - // For upstream connections, the Thrift proxy will use same transport as the downstream - // connection. - AUTO_TRANSPORT = 0; - - // The Thrift proxy will use the Thrift framed transport. - FRAMED = 1; - - // The Thrift proxy will use the Thrift unframed transport. - UNFRAMED = 2; - - // The Thrift proxy will assume the client is using the Thrift header transport. - HEADER = 3; -} - -// Thrift Protocol types supported by Envoy. -enum ProtocolType { - // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. - // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol - // detection. For upstream connections, the Thrift proxy will use the same protocol as the - // downstream connection. - AUTO_PROTOCOL = 0; - - // The Thrift proxy will use the Thrift binary protocol. - BINARY = 1; - - // The Thrift proxy will use Thrift non-strict binary protocol. - LAX_BINARY = 2; - - // The Thrift proxy will use the Thrift compact protocol. - COMPACT = 3; - - // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. - TWITTER = 4; -} - -// [#next-free-field: 6] -message ThriftProxy { - // Supplies the type of transport that the Thrift proxy should use. Defaults to - // :ref:`AUTO_TRANSPORT`. - TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use. Defaults to - // :ref:`AUTO_PROTOCOL`. - ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The route table for the connection manager is static and is specified in this property. - RouteConfiguration route_config = 4; - - // A list of individual Thrift filters that make up the filter chain for requests made to the - // Thrift proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no thrift_filters are specified, a default Thrift router filter - // (`envoy.filters.thrift.router`) is used. - repeated ThriftFilter thrift_filters = 5; -} - -// ThriftFilter configures a Thrift filter. -message ThriftFilter { - // The name of the filter to instantiate. The name must match a supported - // filter. The built-in filters are: - // - // [#comment:TODO(zuercher): Auto generate the following list] - // * :ref:`envoy.filters.thrift.router ` - // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in -// in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.thrift_proxy`. -message ThriftProtocolOptions { - // Supplies the type of transport that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_TRANSPORT`, - // which is the default, causes the proxy to use the same transport as the downstream connection. - TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_PROTOCOL`, - // which is the default, causes the proxy to use the same protocol as the downstream connection. - ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto b/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto deleted file mode 100644 index cae622cecc34e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.zookeeper_proxy.v1alpha1; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1"; -option java_outer_classname = "ZookeeperProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.zookeeper_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: ZooKeeper proxy] -// ZooKeeper Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.zookeeper_proxy] - -message ZooKeeperProxy { - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. - // If the access log field is empty, access logs will not be written. - string access_log = 2; - - // Messages — requests, responses and events — that are bigger than this value will - // be ignored. If it is not set, the default value is 1Mb. - // - // The value here should match the jute.maxbuffer property in your cluster configuration: - // - // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options - // - // if that is set. If it isn't, ZooKeeper's default is also 1Mb. - google.protobuf.UInt32Value max_packet_bytes = 3; -} diff --git a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD deleted file mode 100644 index 5b66057a82cd7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto deleted file mode 100644 index 389ddf35990ed..0000000000000 --- a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.thrift.rate_limit.v2alpha1; - -import "envoy/config/ratelimit/v2/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.ratelimit] - -// [#next-free-field: 6] -message RateLimit { - // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Specifies the rate limit configuration stage. Each configured rate limit filter performs a - // rate limit check using descriptors configured in the - // :ref:`envoy_api_msg_config.filter.network.thrift_proxy.v2alpha1.RouteAction` for the request. - // Only those entries with a matching stage number are used for a given filter. If not set, the - // default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 3; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 4; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto deleted file mode 100644 index 06dc150d5c70b..0000000000000 --- a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.udp.udp_proxy.v2alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha"; -option java_outer_classname = "UdpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.udp.udp_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: UDP proxy] -// UDP proxy :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.udp_proxy] - -// Configuration for the UDP proxy filter. -message UdpProxyConfig { - // The stat prefix used when emitting UDP proxy filter stats. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by - // the session. The default if not specified is 1 minute. - google.protobuf.Duration idle_timeout = 3; -} diff --git a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/BUILD b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto deleted file mode 100644 index b63d35af4018b..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.config.grpc_credential.v2alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; -option java_outer_classname = "AwsIamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Grpc Credentials AWS IAM] -// Configuration for AWS IAM Grpc Credentials Plugin -// [#extension: envoy.grpc_credentials.aws_iam] - -message AwsIamConfig { - // The `service namespace - // `_ - // of the Grpc endpoint. - // - // Example: appmesh - string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The `region `_ hosting the Grpc - // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment - // variable. - // - // Example: us-west-2 - string region = 2; -} diff --git a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto deleted file mode 100644 index 41e67f0bf24b3..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.config.grpc_credential.v2alpha; - -import "envoy/api/v2/core/base.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; -option java_outer_classname = "FileBasedMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Grpc Credentials File Based Metadata] -// Configuration for File Based Metadata Grpc Credentials Plugin -// [#extension: envoy.grpc_credentials.file_based_metadata] - -message FileBasedMetadataConfig { - // Location or inline data of secret to use for authentication of the Google gRPC connection - // this secret will be attached to a header of the gRPC connection - api.v2.core.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true]; - - // Metadata header key to use for sending the secret data - // if no header key is set, "authorization" header will be used - string header_key = 2; - - // Prefix to prepend to the secret in the metadata header - // if no prefix is set, the default is to use no prefix - string header_prefix = 3; -} diff --git a/generated_api_shadow/envoy/config/grpc_credential/v3/BUILD b/generated_api_shadow/envoy/config/grpc_credential/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto b/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto deleted file mode 100644 index e2e9c7da48331..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.config.grpc_credential.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; -option java_outer_classname = "AwsIamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Grpc Credentials AWS IAM] -// Configuration for AWS IAM Grpc Credentials Plugin -// [#extension: envoy.grpc_credentials.aws_iam] - -message AwsIamConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.grpc_credential.v2alpha.AwsIamConfig"; - - // The `service namespace - // `_ - // of the Grpc endpoint. - // - // Example: appmesh - string service_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The `region `_ hosting the Grpc - // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment - // variable. - // - // Example: us-west-2 - string region = 2; -} diff --git a/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto b/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto deleted file mode 100644 index b364d2917099b..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.config.grpc_credential.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; -option java_outer_classname = "FileBasedMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Grpc Credentials File Based Metadata] -// Configuration for File Based Metadata Grpc Credentials Plugin -// [#extension: envoy.grpc_credentials.file_based_metadata] - -message FileBasedMetadataConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig"; - - // Location or inline data of secret to use for authentication of the Google gRPC connection - // this secret will be attached to a header of the gRPC connection - core.v3.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true]; - - // Metadata header key to use for sending the secret data - // if no header key is set, "authorization" header will be used - string header_key = 2; - - // Prefix to prepend to the secret in the metadata header - // if no prefix is set, the default is to use no prefix - string header_prefix = 3; -} diff --git a/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD b/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto b/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto deleted file mode 100644 index 0c569f5c75e8c..0000000000000 --- a/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.config.health_checker.redis.v2; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v2"; -option java_outer_classname = "RedisProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Redis] -// Redis health checker :ref:`configuration overview `. -// [#extension: envoy.health_checkers.redis] - -message Redis { - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; -} diff --git a/generated_api_shadow/envoy/config/listener/v2/BUILD b/generated_api_shadow/envoy/config/listener/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/listener/v2/api_listener.proto b/generated_api_shadow/envoy/config/listener/v2/api_listener.proto deleted file mode 100644 index 6709d5fe0b524..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v2/api_listener.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v2; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v2"; -option java_outer_classname = "ApiListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: API listener] - -// Describes a type of API listener, which is used in non-proxy clients. The type of API -// exposed to the non-proxy application depends on the type of API listener. -message ApiListener { - // The type in this field determines the type of API listener. At present, the following - // types are supported: - // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) - // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the - // specific config message for each type of API listener. We could not do this in v2 because - // it would have caused circular dependencies for go protos: lds.proto depends on this file, - // and http_connection_manager.proto depends on rds.proto, which is in the same directory as - // lds.proto, so lds.proto cannot depend on this file.] - google.protobuf.Any api_listener = 1; -} diff --git a/generated_api_shadow/envoy/config/listener/v3/BUILD b/generated_api_shadow/envoy/config/listener/v3/BUILD deleted file mode 100644 index 3367a7bd5c595..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/listener:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/listener/v2:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/listener/v3/api_listener.proto b/generated_api_shadow/envoy/config/listener/v3/api_listener.proto deleted file mode 100644 index 77db7caaff5c0..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/api_listener.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "ApiListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: API listener] - -// Describes a type of API listener, which is used in non-proxy clients. The type of API -// exposed to the non-proxy application depends on the type of API listener. -message ApiListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v2.ApiListener"; - - // The type in this field determines the type of API listener. At present, the following - // types are supported: - // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) - // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) - // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the - // specific config message for each type of API listener. We could not do this in v2 because - // it would have caused circular dependencies for go protos: lds.proto depends on this file, - // and http_connection_manager.proto depends on rds.proto, which is in the same directory as - // lds.proto, so lds.proto cannot depend on this file.] - google.protobuf.Any api_listener = 1; -} diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto deleted file mode 100644 index a5cd4bfe976f7..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ /dev/null @@ -1,318 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/socket_option.proto"; -import "envoy/config/listener/v3/api_listener.proto"; -import "envoy/config/listener/v3/listener_components.proto"; -import "envoy/config/listener/v3/udp_listener_config.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/collection_entry.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "ListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Listener configuration] -// Listener :ref:`configuration overview ` - -// Listener list collections. Entries are *Listener* resources or references. -// [#not-implemented-hide:] -message ListenerCollection { - repeated xds.core.v3.CollectionEntry entries = 1; -} - -// [#next-free-field: 30] -message Listener { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; - - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - - // [#not-implemented-hide:] - message DeprecatedV1 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Listener.DeprecatedV1"; - - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // This is deprecated. Use :ref:`Listener.bind_to_port - // ` - google.protobuf.BoolValue bind_to_port = 1; - } - - // Configuration for listener connection balancing. - message ConnectionBalanceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Listener.ConnectionBalanceConfig"; - - // A connection balancer implementation that does exact balancing. This means that a lock is - // held during balancing so that connection counts are nearly exactly balanced between worker - // threads. This is "nearly" exact in the sense that a connection might close in parallel thus - // making the counts incorrect, but this should be rectified on the next accept. This balancer - // sacrifices accept throughput for accuracy and should be used when there are a small number of - // connections that rarely cycle (e.g., service mesh gRPC egress). - message ExactBalance { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Listener.ConnectionBalanceConfig.ExactBalance"; - } - - oneof balance_type { - option (validate.required) = true; - - // If specified, the listener will use the exact connection balancer. - ExactBalance exact_balance = 1; - } - } - - // Configuration for envoy internal listener. All the future internal listener features should be added here. - // [#not-implemented-hide:] - message InternalListenerConfig { - } - - reserved 14, 23; - - // The unique name by which this listener is known. If no name is provided, - // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - // updated or removed via :ref:`LDS ` a unique name must be provided. - string name = 1; - - // The address that the listener should listen on. In general, the address must be unique, though - // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on - // Linux as the actual port will be allocated by the OS. - core.v3.Address address = 2 [(validate.rules).message = {required: true}]; - - // Optional prefix to use on listener stats. If empty, the stats will be rooted at - // `listener.
.`. If non-empty, stats will be rooted at - // `listener..`. - string stat_prefix = 28; - - // A list of filter chains to consider for this listener. The - // :ref:`FilterChain ` with the most specific - // :ref:`FilterChainMatch ` criteria is used on a - // connection. - // - // Example using SNI for filter chain selection can be found in the - // :ref:`FAQ entry `. - repeated FilterChain filter_chains = 3; - - // If a connection is redirected using *iptables*, the port on which the proxy - // receives it might be different from the original destination address. When this flag is set to - // true, the listener hands off redirected connections to the listener associated with the - // original destination address. If there is no listener associated with the original destination - // address, the connection is handled by the listener that receives it. Defaults to false. - google.protobuf.BoolValue use_original_dst = 4; - - // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, - // the connection will be closed. The filter chain match is ignored in this field. - FilterChain default_filter_chain = 25; - - // Soft limit on size of the listener’s new connection read and write buffers. - // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Listener metadata. - core.v3.Metadata metadata = 6; - - // [#not-implemented-hide:] - DeprecatedV1 deprecated_v1 = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The type of draining to perform at a listener-wide level. - DrainType drain_type = 8; - - // Listener filters have the opportunity to manipulate and augment the connection metadata that - // is used in connection filter chain matching, for example. These filters are run before any in - // :ref:`filter_chains `. Order matters as the - // filters are processed sequentially right after a socket has been accepted by the listener, and - // before a connection is created. - // UDP Listener filters can be specified when the protocol in the listener socket address in - // :ref:`protocol ` is :ref:`UDP - // `. - // UDP listeners currently support a single filter. - repeated ListenerFilter listener_filters = 9; - - // The timeout to wait for all listener filters to complete operation. If the timeout is reached, - // the accepted socket is closed without a connection being created unless - // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the - // timeout. If not specified, a default timeout of 15s is used. - google.protobuf.Duration listener_filters_timeout = 15; - - // Whether a connection should be created when listener filters timeout. Default is false. - // - // .. attention:: - // - // Some listener filters, such as :ref:`Proxy Protocol filter - // `, should not be used with this option. It will cause - // unexpected behavior when a connection is created. - bool continue_on_listener_filters_timeout = 17; - - // Whether the listener should be set as a transparent socket. - // When this flag is set to true, connections can be redirected to the listener using an - // *iptables* *TPROXY* target, in which case the original source and destination addresses and - // ports are preserved on accepted connections. This flag should be used in combination with - // :ref:`an original_dst ` :ref:`listener filter - // ` to mark the connections' local addresses as - // "restored." This can be used to hand off each redirected connection to another listener - // associated with the connection's destination address. Direct connections to the socket without - // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are - // therefore treated as if they were redirected. - // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. - // When this flag is not set (default), the socket is not modified, i.e. the transparent option - // is neither set nor reset. - google.protobuf.BoolValue transparent = 10; - - // Whether the listener should set the *IP_FREEBIND* socket option. When this - // flag is set to true, listeners can be bound to an IP address that is not - // configured on the system running Envoy. When this flag is set to false, the - // option *IP_FREEBIND* is disabled on the socket. When this flag is not set - // (default), the socket is not modified, i.e. the option is neither enabled - // nor disabled. - google.protobuf.BoolValue freebind = 11; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.v3.SocketOption socket_options = 13; - - // Whether the listener should accept TCP Fast Open (TFO) connections. - // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on - // the socket, with a queue length of the specified size - // (see `details in RFC7413 `_). - // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. - // When this flag is not set (default), the socket is not modified, - // i.e. the option is neither enabled nor disabled. - // - // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - // TCP_FASTOPEN. - // See `ip-sysctl.txt `_. - // - // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. - // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - - // Specifies the intended direction of the traffic relative to the local Envoy. - // This property is required on Windows for listeners using the original destination filter, - // see :ref:`Original Destination `. - core.v3.TrafficDirection traffic_direction = 16; - - // If the protocol in the listener socket address in :ref:`protocol - // ` is :ref:`UDP - // `, this field specifies UDP - // listener specific configuration. - UdpListenerConfig udp_listener_config = 18; - - // Used to represent an API listener, which is used in non-proxy clients. The type of API - // exposed to the non-proxy application depends on the type of API listener. - // When this field is set, no other field except for :ref:`name` - // should be set. - // - // .. note:: - // - // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, - // not LDS. - // - // [#next-major-version: In the v3 API, instead of this messy approach where the socket - // listener fields are directly in the top-level Listener message and the API listener types - // are in the ApiListener message, the socket listener messages should be in their own message, - // and the top-level Listener should essentially be a oneof that selects between the - // socket listener and the various types of API listener. That way, a given Listener message - // can structurally only contain the fields of the relevant type.] - ApiListener api_listener = 19; - - // The listener's connection balancer configuration, currently only applicable to TCP listeners. - // If no configuration is specified, Envoy will not attempt to balance active connections between - // worker threads. - // - // In the scenario that the listener X redirects all the connections to the listeners Y1 and Y2 - // by setting :ref:`use_original_dst ` in X - // and :ref:`bind_to_port ` to false in Y1 and Y2, - // it is recommended to disable the balance config in listener X to avoid the cost of balancing, and - // enable the balance config in Y1 and Y2 to balance the connections among the workers. - ConnectionBalanceConfig connection_balance_config = 20; - - // Deprecated. Use `enable_reuse_port` instead. - bool reuse_port = 21 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and - // create one socket for each worker thread. This makes inbound connections - // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. This field - // defaults to true. - // - // .. attention:: - // - // Although this field defaults to true, it has different behavior on different platforms. See - // the following text for more information. - // - // * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly - // with hot restart. - // * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, - // the last socket wins and receives all connections/packets. For TCP, reuse_port is force - // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive - // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only - // a single worker will currently receive packets. - // * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user - // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. - google.protobuf.BoolValue enable_reuse_port = 29; - - // Configuration for :ref:`access logs ` - // emitted by this listener. - repeated accesslog.v3.AccessLog access_log = 22; - - // The maximum length a tcp listener's pending connections queue can grow to. If no value is - // provided net.core.somaxconn will be used on Linux and 128 otherwise. - google.protobuf.UInt32Value tcp_backlog_size = 24; - - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that set - // :ref:`use_original_dst ` - // to true. Default is true. - google.protobuf.BoolValue bind_to_port = 26; - - // The exclusive listener type and the corresponding config. - // TODO(lambdai): https://github.com/envoyproxy/envoy/issues/15372 - // Will create and add TcpListenerConfig. Will add UdpListenerConfig and ApiListener. - // [#not-implemented-hide:] - oneof listener_specifier { - // Used to represent an internal listener which does not listen on OSI L4 address but can be used by the - // :ref:`envoy cluster ` to create a user space connection to. - // The internal listener acts as a tcp listener. It supports listener filters and network filter chains. - // The internal listener require :ref:`address ` has - // field `envoy_internal_address`. - // - // There are some limitations are derived from the implementation. The known limitations include - // - // * :ref:`ConnectionBalanceConfig ` is not - // allowed because both cluster connection and listener connection must be owned by the same dispatcher. - // * :ref:`tcp_backlog_size ` - // * :ref:`freebind ` - // * :ref:`transparent ` - // [#not-implemented-hide:] - InternalListenerConfig internal_listener = 27; - } -} diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto deleted file mode 100644 index 1e7e205bfded9..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ /dev/null @@ -1,361 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "ListenerComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Listener components] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 6] -message Filter { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.Filter"; - - reserved 3; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - // [#extension-category: envoy.filters.network] - google.protobuf.Any typed_config = 4; - - // Configuration source specifier for an extension configuration discovery - // service. In case of a failure and without the default configuration, the - // listener closes the connections. - // [#not-implemented-hide:] - core.v3.ExtensionConfigSource config_discovery = 5; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// Specifies the match criteria for selecting a specific filter chain for a -// listener. -// -// In order for a filter chain to be selected, *ALL* of its criteria must be -// fulfilled by the incoming connection, properties of which are set by the -// networking stack and/or listener filters. -// -// The following order applies: -// -// 1. Destination port. -// 2. Destination IP address. -// 3. Server name (e.g. SNI for TLS protocol), -// 4. Transport protocol. -// 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Directly connected source IP address (this will only be different from the source IP address -// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol -// listener filter `). -// 7. Source type (e.g. any, local or external network). -// 8. Source IP address. -// 9. Source port. -// -// For criteria that allow ranges or wildcards, the most specific value in any -// of the configured filter chains that matches the incoming connection is going -// to be used (e.g. for SNI ``www.example.com`` the most specific match would be -// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter -// chain without ``server_names`` requirements). -// -// A different way to reason about the filter chain matches: -// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. -// In each step, filter chains which most specifically matches the attributes continue to the next step. -// The listener guarantees at most 1 filter chain is left after all of the steps. -// -// Example: -// -// For destination port, filter chains specifying the destination port of incoming traffic are the -// most specific match. If none of the filter chains specifies the exact destination port, the filter -// chains which do not specify ports are the most specific match. Filter chains specifying the -// wrong port can never be the most specific match. -// -// [#comment: Implemented rules are kept in the preference order, with deprecated fields -// listed at the end, because that's how we want to list them in the docs. -// -// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 14] -message FilterChainMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.FilterChainMatch"; - - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - - // Match a connection originating from the same host. - SAME_IP_OR_LOOPBACK = 1; - - // Match a connection originating from a different host. - EXTERNAL = 2; - } - - reserved 1; - - // Optional destination port to consider when use_original_dst is set on the - // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; - - // If non-empty, an IP address and prefix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - repeated core.v3.CidrRange prefix_ranges = 3; - - // If non-empty, an IP address and suffix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - // [#not-implemented-hide:] - string address_suffix = 4; - - // [#not-implemented-hide:] - google.protobuf.UInt32Value suffix_len = 5; - - // The criteria is satisfied if the directly connected source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the parameter is not - // specified or the list is empty, the directly connected source IP address is ignored. - repeated core.v3.CidrRange direct_source_prefix_ranges = 13; - - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; - - // The criteria is satisfied if the source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the - // parameter is not specified or the list is empty, the source IP address is - // ignored. - repeated core.v3.CidrRange source_prefix_ranges = 6; - - // The criteria is satisfied if the source port of the downstream connection - // is contained in at least one of the specified ports. If the parameter is - // not specified, the source port is ignored. - repeated uint32 source_ports = 7 - [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; - - // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining - // a filter chain match. Those values will be compared against the server names of a new - // connection, when detected by one of the listener filters. - // - // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` - // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. - // - // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. - // - // .. attention:: - // - // See the :ref:`FAQ entry ` on how to configure SNI for more - // information. - repeated string server_names = 11; - - // If non-empty, a transport protocol to consider when determining a filter chain match. - // This value will be compared against the transport protocol of a new connection, when - // it's detected by one of the listener filters. - // - // Suggested values include: - // - // * ``raw_buffer`` - default, used when no transport protocol is detected, - // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // when TLS protocol is detected. - string transport_protocol = 9; - - // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when - // determining a filter chain match. Those values will be compared against the application - // protocols of a new connection, when detected by one of the listener filters. - // - // Suggested values include: - // - // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector - // `, - // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // - // .. attention:: - // - // Currently, only :ref:`TLS Inspector ` provides - // application protocol detection based on the requested - // `ALPN `_ values. - // - // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, - // and matching on values other than ``h2`` is going to lead to a lot of false negatives, - // unless all connecting clients are known to use ALPN. - repeated string application_protocols = 10; -} - -// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and -// various other parameters. -// [#next-free-field: 10] -message FilterChain { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.FilterChain"; - - // The configuration for on-demand filter chain. If this field is not empty in FilterChain message, - // a filter chain will be built on-demand. - // On-demand filter chains help speedup the warming up of listeners since the building and initialization of - // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. - // Filter chains that are not often used can be set as on-demand. - message OnDemandConfiguration { - // The timeout to wait for filter chain placeholders to complete rebuilding. - // 1. If this field is set to 0, timeout is disabled. - // 2. If not specified, a default timeout of 15s is used. - // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. - // Upon failure or timeout, all connections related to this filter chain will be closed. - // Rebuilding will start again on the next new connection. - google.protobuf.Duration rebuild_timeout = 1; - } - - // The criteria to use when matching a connection to this filter chain. - FilterChainMatch filter_chain_match = 1; - - // A list of individual network filters that make up the filter chain for - // connections established with the listener. Order matters as the filters are - // processed sequentially as connection events happen. Note: If the filter - // list is empty, the connection will close by default. - repeated Filter filters = 3; - - // Whether the listener should expect a PROXY protocol V1 header on new - // connections. If this option is enabled, the listener will assume that that - // remote address of the connection is the one specified in the header. Some - // load balancers including the AWS ELB support this option. If the option is - // absent or set to false, Envoy will use the physical peer address of the - // connection as the remote address. - // - // This field is deprecated. Add a - // :ref:`PROXY protocol listener filter ` - // explicitly instead. - google.protobuf.BoolValue use_proxy_proto = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // [#not-implemented-hide:] filter chain metadata. - core.v3.Metadata metadata = 5; - - // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`DownstreamTlsContext ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - // [#extension-category: envoy.transport_sockets.downstream] - core.v3.TransportSocket transport_socket = 6; - - // If present and nonzero, the amount of time to allow incoming connections to complete any - // transport socket negotiations. If this expires before the transport reports connection - // establishment, the connection is summarily closed. - google.protobuf.Duration transport_socket_connect_timeout = 9; - - // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no - // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter - // chain is to be dynamically updated or removed via FCDS a unique name must be provided. - string name = 7; - - // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. - // If this field is not empty, the filter chain will be built on-demand. - // Otherwise, the filter chain will be built normally and block listener warming. - OnDemandConfiguration on_demand_configuration = 8; - - envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - hidden_envoy_deprecated_tls_context = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// Listener filter chain match configuration. This is a recursive structure which allows complex -// nested match configurations to be built using various logical operators. -// -// Examples: -// -// * Matches if the destination port is 3306. -// -// .. code-block:: yaml -// -// destination_port_range: -// start: 3306 -// end: 3307 -// -// * Matches if the destination port is 3306 or 15000. -// -// .. code-block:: yaml -// -// or_match: -// rules: -// - destination_port_range: -// start: 3306 -// end: 3307 -// - destination_port_range: -// start: 15000 -// end: 15001 -// -// [#next-free-field: 6] -message ListenerFilterChainMatchPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.ListenerFilterChainMatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.ListenerFilterChainMatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated ListenerFilterChainMatchPredicate rules = 1 - [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - ListenerFilterChainMatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // Match destination port. Particularly, the match evaluation must use the recovered local port if - // the owning listener filter is after :ref:`an original_dst listener filter `. - type.v3.Int32Range destination_port_range = 5; - } -} - -message ListenerFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.ListenerFilter"; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - // [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - // See :ref:`ListenerFilterChainMatchPredicate ` - // for further examples. - ListenerFilterChainMatchPredicate filter_disabled = 4; -} diff --git a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto b/generated_api_shadow/envoy/config/listener/v3/quic_config.proto deleted file mode 100644 index 1432e1911b5d0..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/protocol.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "QuicConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: QUIC listener config] - -// Configuration specific to the UDP QUIC listener. -// [#next-free-field: 8] -message QuicProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.QuicProtocolOptions"; - - core.v3.QuicProtocolOptions quic_protocol_options = 1; - - // Maximum number of milliseconds that connection will be alive when there is - // no network activity. 300000ms if not specified. - google.protobuf.Duration idle_timeout = 2; - - // Connection timeout in milliseconds before the crypto handshake is finished. - // 20000ms if not specified. - google.protobuf.Duration crypto_handshake_timeout = 3; - - // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults - // to enabled. - core.v3.RuntimeFeatureFlag enabled = 4; - - // A multiplier to number of connections which is used to determine how many packets to read per - // event loop. A reasonable number should allow the listener to process enough payload but not - // starve TCP and other UDP sockets and also prevent long event loop duration. - // The default value is 32. This means if there are N QUIC connections, the total number of - // packets to read in each read event will be 32 * N. - // The actual number of packets to read in total by the UDP listener is also - // bound by 6000, regardless of this field or how many connections there are. - google.protobuf.UInt32Value packets_to_read_to_connection_count_ratio = 5 - [(validate.rules).uint32 = {gte: 1}]; - - // Configure which implementation of `quic::QuicCryptoClientStreamBase` to be used for this listener. - // If not specified the :ref:`QUICHE default one configured by ` will be used. - // [#extension-category: envoy.quic.server.crypto_stream] - core.v3.TypedExtensionConfig crypto_stream_config = 6; - - // Configure which implementation of `quic::ProofSource` to be used for this listener. - // If not specified the :ref:`default one configured by ` will be used. - // [#extension-category: envoy.quic.proof_source] - core.v3.TypedExtensionConfig proof_source_config = 7; -} diff --git a/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto b/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto deleted file mode 100644 index 276e98153aeb5..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "envoy/config/core/v3/udp_socket_config.proto"; -import "envoy/config/listener/v3/quic_config.proto"; - -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "UdpListenerConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: UDP listener config] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 8] -message UdpListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.UdpListenerConfig"; - - reserved 1, 3, 4, 6; - - // UDP socket configuration for the listener. The default for - // :ref:`prefer_gro ` is false for - // listener sockets. If receiving a large amount of datagrams from a small number of sources, it - // may be worthwhile to enable this option after performance testing. - core.v3.UdpSocketConfig downstream_socket_config = 5; - - // Configuration for QUIC protocol. If empty, QUIC will not be enabled on this listener. Set - // to the default object to enable QUIC without modifying any additional options. - // - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - QuicProtocolOptions quic_options = 7; - - oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -message ActiveRawUdpListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.ActiveRawUdpListenerConfig"; -} diff --git a/generated_api_shadow/envoy/config/metrics/v2/BUILD b/generated_api_shadow/envoy/config/metrics/v2/BUILD deleted file mode 100644 index aaab1df155473..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto deleted file mode 100644 index f1f8662f0750d..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v2"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metrics service] - -// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink -// `. This opaque configuration will be used to create -// Metrics Service. -// [#extension: envoy.stat_sinks.metrics_service] -message MetricsServiceConfig { - // The upstream gRPC cluster that hosts the metrics service. - api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/metrics/v2/stats.proto b/generated_api_shadow/envoy/config/metrics/v2/stats.proto deleted file mode 100644 index 62afcf56e4e71..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v2/stats.proto +++ /dev/null @@ -1,339 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v2"; -option java_outer_classname = "StatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Stats] -// Statistics :ref:`architecture overview `. - -// Configuration for pluggable stats sinks. -message StatsSink { - // The name of the stats sink to instantiate. The name must match a supported - // stats sink. The built-in stats sinks are: - // - // * :ref:`envoy.stat_sinks.statsd ` - // * :ref:`envoy.stat_sinks.dog_statsd ` - // * :ref:`envoy.stat_sinks.metrics_service ` - // * :ref:`envoy.stat_sinks.hystrix ` - // - // Sinks optionally support tagged/multiple dimensional metrics. - string name = 1; - - // Stats sink specific configuration which depends on the sink being instantiated. See - // :ref:`StatsdSink ` for an example. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -// Statistics configuration such as tagging. -message StatsConfig { - // Each stat name is iteratively processed through these tag specifiers. - // When a tag is matched, the first capture group is removed from the name so - // later :ref:`TagSpecifiers ` cannot match that - // same portion of the match. - repeated TagSpecifier stats_tags = 1; - - // Use all default tag regexes specified in Envoy. These can be combined with - // custom tags specified in :ref:`stats_tags - // `. They will be processed before - // the custom tags. - // - // .. note:: - // - // If any default tags are specified twice, the config will be considered - // invalid. - // - // See :repo:`well_known_names.h ` for a list of the - // default tags in Envoy. - // - // If not provided, the value is assumed to be true. - google.protobuf.BoolValue use_all_default_tags = 2; - - // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated - // as normal. Preventing the instantiation of certain families of stats can improve memory - // performance for Envoys running especially large configs. - // - // .. warning:: - // Excluding stats may affect Envoy's behavior in undocumented ways. See - // `issue #8771 `_ for more information. - // If any unexpected behavior changes are observed, please open a new issue immediately. - StatsMatcher stats_matcher = 3; -} - -// Configuration for disabling stat instantiation. -message StatsMatcher { - // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to - // instantiate all stats, there is no need to construct a StatsMatcher. - // - // However, StatsMatcher can be used to limit the creation of families of stats in order to - // conserve memory. Stats can either be disabled entirely, or they can be - // limited by either an exclusion or an inclusion list of :ref:`StringMatcher - // ` protos: - // - // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to - // `false`, all stats will be instantiated. - // - // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the - // list will not instantiate. - // - // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of - // the StringMatchers in the list. - // - // - // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. - // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based - // matcher rather than a regex-based matcher. - // - // Example 1. Excluding all stats. - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "rejectAll": "true" - // } - // } - // - // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "exclusionList": { - // "patterns": [ - // { - // "prefix": "cluster." - // } - // ] - // } - // } - // } - // - // Example 3. Including only manager-related stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "inclusionList": { - // "patterns": [ - // { - // "prefix": "cluster_manager." - // }, - // { - // "prefix": "listener_manager." - // } - // ] - // } - // } - // } - // - - oneof stats_matcher { - option (validate.required) = true; - - // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all - // stats are enabled. - bool reject_all = 1; - - // Exclusive match. All stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.ListStringMatcher exclusion_list = 2; - - // Inclusive match. No stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.ListStringMatcher inclusion_list = 3; - } -} - -// Designates a tag name and value pair. The value may be either a fixed value -// or a regex providing the value via capture groups. The specified tag will be -// unconditionally set if a fixed value, otherwise it will only be set if one -// or more capture groups in the regex match. -message TagSpecifier { - // Attaches an identifier to the tag values to identify the tag being in the - // sink. Envoy has a set of default names and regexes to extract dynamic - // portions of existing stats, which can be found in :repo:`well_known_names.h - // ` in the Envoy repository. If a :ref:`tag_name - // ` is provided in the config and - // neither :ref:`regex ` or - // :ref:`fixed_value ` were specified, - // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. - // - // .. note:: - // - // It is invalid to specify the same tag name twice in a config. - string tag_name = 1; - - oneof tag_value { - // Designates a tag to strip from the tag extracted name and provide as a named - // tag value for all statistics. This will only occur if any part of the name - // matches the regex provided with one or more capture groups. - // - // The first capture group identifies the portion of the name to remove. The - // second capture group (which will normally be nested inside the first) will - // designate the value of the tag for the statistic. If no second capture - // group is provided, the first will also be used to set the value of the tag. - // All other capture groups will be ignored. - // - // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and - // one tag specifier: - // - // .. code-block:: json - // - // { - // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\\.((.+?)\\.)" - // } - // - // Note that the regex will remove ``foo_cluster.`` making the tag extracted - // name ``cluster.upstream_rq_timeout`` and the tag value for - // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no - // ``.`` character because of the second capture group). - // - // Example 2. a stat name - // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two - // tag specifiers: - // - // .. code-block:: json - // - // [ - // { - // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" - // }, - // { - // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\\.((.*?)\\.)" - // } - // ] - // - // The two regexes of the specifiers will be processed in the definition order. - // - // The first regex will remove ``ios.``, leaving the tag extracted name - // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag - // ``envoy.http_user_agent`` will be added with tag value ``ios``. - // - // The second regex will remove ``connection_manager_1.`` from the tag - // extracted name produced by the first regex - // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving - // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag - // ``envoy.http_conn_manager_prefix`` will be added with the tag value - // ``connection_manager_1``. - string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; - - // Specifies a fixed tag value for the ``tag_name``. - string fixed_value = 3; - } -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support -// tagged metrics. -// [#extension: envoy.stat_sinks.statsd] -message StatsdSink { - oneof statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running `statsd `_ - // compliant listener. If specified, statistics will be flushed to this - // address. - api.v2.core.Address address = 1; - - // The name of a cluster that is running a TCP `statsd - // `_ compliant listener. If specified, - // Envoy will connect to this cluster to flush statistics. - string tcp_cluster_name = 2; - } - - // Optional custom prefix for StatsdSink. If - // specified, this will override the default prefix. - // For example: - // - // .. code-block:: json - // - // { - // "prefix" : "envoy-prod" - // } - // - // will change emitted stats to - // - // .. code-block:: cpp - // - // envoy-prod.test_counter:1|c - // envoy-prod.test_timer:5|ms - // - // Note that the default prefix, "envoy", will be used if a prefix is not - // specified. - // - // Stats with default prefix: - // - // .. code-block:: cpp - // - // envoy.test_counter:1|c - // envoy.test_timer:5|ms - string prefix = 3; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. -// The sink emits stats with `DogStatsD `_ -// compatible tags. Tags are configurable via :ref:`StatsConfig -// `. -// [#extension: envoy.stat_sinks.dog_statsd] -message DogStatsdSink { - reserved 2; - - oneof dog_statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running DogStatsD compliant listener. If specified, - // statistics will be flushed to this address. - api.v2.core.Address address = 1; - } - - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. - string prefix = 3; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. -// The sink emits stats in `text/event-stream -// `_ -// formatted stream for use by `Hystrix dashboard -// `_. -// -// Note that only a single HystrixSink should be configured. -// -// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. -// [#extension: envoy.stat_sinks.hystrix] -message HystrixSink { - // The number of buckets the rolling statistical window is divided into. - // - // Each time the sink is flushed, all relevant Envoy statistics are sampled and - // added to the rolling window (removing the oldest samples in the window - // in the process). The sink then outputs the aggregate statistics across the - // current rolling window to the event stream(s). - // - // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets - // - // More detailed explanation can be found in `Hystrix wiki - // `_. - int64 num_buckets = 1; -} diff --git a/generated_api_shadow/envoy/config/metrics/v3/BUILD b/generated_api_shadow/envoy/config/metrics/v3/BUILD deleted file mode 100644 index 8e9c73c09e118..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/metrics/v2:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto deleted file mode 100644 index df3c71e6a6308..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v3; - -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v3"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metrics service] - -// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink -// `. This opaque configuration will be used to create -// Metrics Service. -// -// Example: -// -// .. code-block:: yaml -// -// stats_sinks: -// - name: envoy.stat_sinks.metrics_service -// typed_config: -// "@type": type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig -// transport_api_version: V3 -// -// [#extension: envoy.stat_sinks.metrics_service] -message MetricsServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.MetricsServiceConfig"; - - // The upstream gRPC cluster that hosts the metrics service. - core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; - - // API version for metric service transport protocol. This describes the metric service gRPC - // endpoint and version of messages used on the wire. - core.v3.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; - - // If true, counters are reported as the delta between flushing intervals. Otherwise, the current - // counter value is reported. Defaults to false. - // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the - // sink will take updates from the :ref:`MetricsResponse `. - google.protobuf.BoolValue report_counters_as_deltas = 2; - - // If true, metrics will have their tags emitted as labels on the metrics objects sent to the MetricsService, - // and the tag extracted name will be used instead of the full name, which may contain values used by the tag - // extractor or additional tags added during stats creation. - bool emit_tags_as_labels = 4; -} diff --git a/generated_api_shadow/envoy/config/metrics/v3/stats.proto b/generated_api_shadow/envoy/config/metrics/v3/stats.proto deleted file mode 100644 index 1b5e833e2bede..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v3/stats.proto +++ /dev/null @@ -1,409 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v3"; -option java_outer_classname = "StatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Stats] -// Statistics :ref:`architecture overview `. - -// Configuration for pluggable stats sinks. -message StatsSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsSink"; - - // The name of the stats sink to instantiate. The name must match a supported - // stats sink. - // See the :ref:`extensions listed in typed_config below ` for the default list of available stats sink. - // Sinks optionally support tagged/multiple dimensional metrics. - string name = 1; - - // Stats sink specific configuration which depends on the sink being instantiated. See - // :ref:`StatsdSink ` for an example. - // [#extension-category: envoy.stats_sinks] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// Statistics configuration such as tagging. -message StatsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.StatsConfig"; - - // Each stat name is iteratively processed through these tag specifiers. - // When a tag is matched, the first capture group is removed from the name so - // later :ref:`TagSpecifiers ` cannot match that - // same portion of the match. - repeated TagSpecifier stats_tags = 1; - - // Use all default tag regexes specified in Envoy. These can be combined with - // custom tags specified in :ref:`stats_tags - // `. They will be processed before - // the custom tags. - // - // .. note:: - // - // If any default tags are specified twice, the config will be considered - // invalid. - // - // See :repo:`well_known_names.h ` for a list of the - // default tags in Envoy. - // - // If not provided, the value is assumed to be true. - google.protobuf.BoolValue use_all_default_tags = 2; - - // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated - // as normal. Preventing the instantiation of certain families of stats can improve memory - // performance for Envoys running especially large configs. - // - // .. warning:: - // Excluding stats may affect Envoy's behavior in undocumented ways. See - // `issue #8771 `_ for more information. - // If any unexpected behavior changes are observed, please open a new issue immediately. - StatsMatcher stats_matcher = 3; - - // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first - // match is applied. If no match is found (or if no rules are set), the following default buckets - // are used: - // - // .. code-block:: json - // - // [ - // 0.5, - // 1, - // 5, - // 10, - // 25, - // 50, - // 100, - // 250, - // 500, - // 1000, - // 2500, - // 5000, - // 10000, - // 30000, - // 60000, - // 300000, - // 600000, - // 1800000, - // 3600000 - // ] - repeated HistogramBucketSettings histogram_bucket_settings = 4; -} - -// Configuration for disabling stat instantiation. -message StatsMatcher { - // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to - // instantiate all stats, there is no need to construct a StatsMatcher. - // - // However, StatsMatcher can be used to limit the creation of families of stats in order to - // conserve memory. Stats can either be disabled entirely, or they can be - // limited by either an exclusion or an inclusion list of :ref:`StringMatcher - // ` protos: - // - // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to - // `false`, all stats will be instantiated. - // - // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the - // list will not instantiate. - // - // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of - // the StringMatchers in the list. - // - // - // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. - // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based - // matcher rather than a regex-based matcher. - // - // Example 1. Excluding all stats. - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "rejectAll": "true" - // } - // } - // - // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "exclusionList": { - // "patterns": [ - // { - // "prefix": "cluster." - // } - // ] - // } - // } - // } - // - // Example 3. Including only manager-related stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "inclusionList": { - // "patterns": [ - // { - // "prefix": "cluster_manager." - // }, - // { - // "prefix": "listener_manager." - // } - // ] - // } - // } - // } - // - - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.StatsMatcher"; - - oneof stats_matcher { - option (validate.required) = true; - - // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all - // stats are enabled. - bool reject_all = 1; - - // Exclusive match. All stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.v3.ListStringMatcher exclusion_list = 2; - - // Inclusive match. No stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.v3.ListStringMatcher inclusion_list = 3; - } -} - -// Designates a tag name and value pair. The value may be either a fixed value -// or a regex providing the value via capture groups. The specified tag will be -// unconditionally set if a fixed value, otherwise it will only be set if one -// or more capture groups in the regex match. -message TagSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.TagSpecifier"; - - // Attaches an identifier to the tag values to identify the tag being in the - // sink. Envoy has a set of default names and regexes to extract dynamic - // portions of existing stats, which can be found in :repo:`well_known_names.h - // ` in the Envoy repository. If a :ref:`tag_name - // ` is provided in the config and - // neither :ref:`regex ` or - // :ref:`fixed_value ` were specified, - // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. - // - // .. note:: - // - // It is invalid to specify the same tag name twice in a config. - string tag_name = 1; - - oneof tag_value { - // Designates a tag to strip from the tag extracted name and provide as a named - // tag value for all statistics. This will only occur if any part of the name - // matches the regex provided with one or more capture groups. - // - // The first capture group identifies the portion of the name to remove. The - // second capture group (which will normally be nested inside the first) will - // designate the value of the tag for the statistic. If no second capture - // group is provided, the first will also be used to set the value of the tag. - // All other capture groups will be ignored. - // - // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and - // one tag specifier: - // - // .. code-block:: json - // - // { - // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\\.((.+?)\\.)" - // } - // - // Note that the regex will remove ``foo_cluster.`` making the tag extracted - // name ``cluster.upstream_rq_timeout`` and the tag value for - // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no - // ``.`` character because of the second capture group). - // - // Example 2. a stat name - // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two - // tag specifiers: - // - // .. code-block:: json - // - // [ - // { - // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" - // }, - // { - // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\\.((.*?)\\.)" - // } - // ] - // - // The two regexes of the specifiers will be processed in the definition order. - // - // The first regex will remove ``ios.``, leaving the tag extracted name - // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag - // ``envoy.http_user_agent`` will be added with tag value ``ios``. - // - // The second regex will remove ``connection_manager_1.`` from the tag - // extracted name produced by the first regex - // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving - // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag - // ``envoy.http_conn_manager_prefix`` will be added with the tag value - // ``connection_manager_1``. - string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; - - // Specifies a fixed tag value for the ``tag_name``. - string fixed_value = 3; - } -} - -// Specifies a matcher for stats and the buckets that matching stats should use. -message HistogramBucketSettings { - // The stats that this rule applies to. The match is applied to the original stat name - // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. - type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}]; - - // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. - // The order of the buckets does not matter. - repeated double buckets = 2 [(validate.rules).repeated = { - min_items: 1 - unique: true - items {double {gt: 0.0}} - }]; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support -// tagged metrics. -// [#extension: envoy.stat_sinks.statsd] -message StatsdSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsdSink"; - - oneof statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running `statsd `_ - // compliant listener. If specified, statistics will be flushed to this - // address. - core.v3.Address address = 1; - - // The name of a cluster that is running a TCP `statsd - // `_ compliant listener. If specified, - // Envoy will connect to this cluster to flush statistics. - string tcp_cluster_name = 2; - } - - // Optional custom prefix for StatsdSink. If - // specified, this will override the default prefix. - // For example: - // - // .. code-block:: json - // - // { - // "prefix" : "envoy-prod" - // } - // - // will change emitted stats to - // - // .. code-block:: cpp - // - // envoy-prod.test_counter:1|c - // envoy-prod.test_timer:5|ms - // - // Note that the default prefix, "envoy", will be used if a prefix is not - // specified. - // - // Stats with default prefix: - // - // .. code-block:: cpp - // - // envoy.test_counter:1|c - // envoy.test_timer:5|ms - string prefix = 3; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. -// The sink emits stats with `DogStatsD `_ -// compatible tags. Tags are configurable via :ref:`StatsConfig -// `. -// [#extension: envoy.stat_sinks.dog_statsd] -message DogStatsdSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.DogStatsdSink"; - - reserved 2; - - oneof dog_statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running DogStatsD compliant listener. If specified, - // statistics will be flushed to this address. - core.v3.Address address = 1; - } - - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. - string prefix = 3; - - // Optional max datagram size to use when sending UDP messages. By default Envoy - // will emit one metric per datagram. By specifying a max-size larger than a single - // metric, Envoy will emit multiple, new-line separated metrics. The max datagram - // size should not exceed your network's MTU. - // - // Note that this value may not be respected if smaller than a single metric. - google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. -// The sink emits stats in `text/event-stream -// `_ -// formatted stream for use by `Hystrix dashboard -// `_. -// -// Note that only a single HystrixSink should be configured. -// -// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. -// [#extension: envoy.stat_sinks.hystrix] -message HystrixSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.HystrixSink"; - - // The number of buckets the rolling statistical window is divided into. - // - // Each time the sink is flushed, all relevant Envoy statistics are sampled and - // added to the rolling window (removing the oldest samples in the window - // in the process). The sink then outputs the aggregate statistics across the - // current rolling window to the event stream(s). - // - // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets - // - // More detailed explanation can be found in `Hystrix wiki - // `_. - int64 num_buckets = 1; -} diff --git a/generated_api_shadow/envoy/config/overload/v2alpha/BUILD b/generated_api_shadow/envoy/config/overload/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/overload/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto b/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto deleted file mode 100644 index 03886cdee6d6e..0000000000000 --- a/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto3"; - -package envoy.config.overload.v2alpha; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.overload.v2alpha"; -option java_outer_classname = "OverloadProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Overload Manager] - -// The Overload Manager provides an extensible framework to protect Envoy instances -// from overload of various resources (memory, cpu, file descriptors, etc). -// It monitors a configurable set of resources and notifies registered listeners -// when triggers related to those resources fire. - -message ResourceMonitor { - // The name of the resource monitor to instantiate. Must match a registered - // resource monitor type. The built-in resource monitors are: - // - // * :ref:`envoy.resource_monitors.fixed_heap - // ` - // * :ref:`envoy.resource_monitors.injected_resource - // ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Configuration for the resource monitor being instantiated. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -message ThresholdTrigger { - // If the resource pressure is greater than or equal to this value, the trigger - // will fire. - double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; -} - -message Trigger { - // The name of the resource this is a trigger for. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof trigger_oneof { - option (validate.required) = true; - - ThresholdTrigger threshold = 2; - } -} - -message OverloadAction { - // The name of the overload action. This is just a well-known string that listeners can - // use for registering callbacks. Custom overload actions should be named using reverse - // DNS to ensure uniqueness. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // A set of triggers for this action. If any of these triggers fire the overload action - // is activated. Listeners are notified when the overload action transitions from - // inactivated to activated, or vice versa. - repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -message OverloadManager { - // The interval for refreshing resource usage. - google.protobuf.Duration refresh_interval = 1; - - // The set of resources to monitor. - repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The set of overload actions. - repeated OverloadAction actions = 3; -} diff --git a/generated_api_shadow/envoy/config/overload/v3/BUILD b/generated_api_shadow/envoy/config/overload/v3/BUILD deleted file mode 100644 index 9a222edfc8e6a..0000000000000 --- a/generated_api_shadow/envoy/config/overload/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/overload/v2alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/overload/v3/overload.proto b/generated_api_shadow/envoy/config/overload/v3/overload.proto deleted file mode 100644 index 5ff2222987f6f..0000000000000 --- a/generated_api_shadow/envoy/config/overload/v3/overload.proto +++ /dev/null @@ -1,180 +0,0 @@ -syntax = "proto3"; - -package envoy.config.overload.v3; - -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.overload.v3"; -option java_outer_classname = "OverloadProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Overload Manager] - -// The Overload Manager provides an extensible framework to protect Envoy instances -// from overload of various resources (memory, cpu, file descriptors, etc). -// It monitors a configurable set of resources and notifies registered listeners -// when triggers related to those resources fire. - -message ResourceMonitor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.ResourceMonitor"; - - // The name of the resource monitor to instantiate. Must match a registered - // resource monitor type. - // See the :ref:`extensions listed in typed_config below ` for the default list of available resource monitor. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Configuration for the resource monitor being instantiated. - // [#extension-category: envoy.resource_monitors] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -message ThresholdTrigger { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.ThresholdTrigger"; - - // If the resource pressure is greater than or equal to this value, the trigger - // will enter saturation. - double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; -} - -message ScaledTrigger { - // If the resource pressure is greater than this value, the trigger will be in the - // :ref:`scaling ` state with value - // `(pressure - scaling_threshold) / (saturation_threshold - scaling_threshold)`. - double scaling_threshold = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; - - // If the resource pressure is greater than this value, the trigger will enter saturation. - double saturation_threshold = 2 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; -} - -message Trigger { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.Trigger"; - - // The name of the resource this is a trigger for. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof trigger_oneof { - option (validate.required) = true; - - ThresholdTrigger threshold = 2; - - ScaledTrigger scaled = 3; - } -} - -// Typed configuration for the "envoy.overload_actions.reduce_timeouts" action. See -// :ref:`the docs ` for an example of how to configure -// the action with different timeouts and minimum values. -message ScaleTimersOverloadActionConfig { - enum TimerType { - // Unsupported value; users must explicitly specify the timer they want scaled. - UNSPECIFIED = 0; - - // Adjusts the idle timer for downstream HTTP connections that takes effect when there are no active streams. - // This affects the value of :ref:`HttpConnectionManager.common_http_protocol_options.idle_timeout - // ` - HTTP_DOWNSTREAM_CONNECTION_IDLE = 1; - - // Adjusts the idle timer for HTTP streams initiated by downstream clients. - // This affects the value of :ref:`RouteAction.idle_timeout ` and - // :ref:`HttpConnectionManager.stream_idle_timeout - // ` - HTTP_DOWNSTREAM_STREAM_IDLE = 2; - - // Adjusts the timer for how long downstream clients have to finish transport-level negotiations - // before the connection is closed. - // This affects the value of - // :ref:`FilterChain.transport_socket_connect_timeout `. - TRANSPORT_SOCKET_CONNECT = 3; - } - - message ScaleTimer { - // The type of timer this minimum applies to. - TimerType timer = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; - - oneof overload_adjust { - option (validate.required) = true; - - // Sets the minimum duration as an absolute value. - google.protobuf.Duration min_timeout = 2; - - // Sets the minimum duration as a percentage of the maximum value. - type.v3.Percent min_scale = 3; - } - } - - // A set of timer scaling rules to be applied. - repeated ScaleTimer timer_scale_factors = 1 [(validate.rules).repeated = {min_items: 1}]; -} - -message OverloadAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.OverloadAction"; - - // The name of the overload action. This is just a well-known string that listeners can - // use for registering callbacks. Custom overload actions should be named using reverse - // DNS to ensure uniqueness. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A set of triggers for this action. The state of the action is the maximum - // state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners - // are notified when the overload action changes state. - repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; - - // Configuration for the action being instantiated. - google.protobuf.Any typed_config = 3; -} - -// Configuration for which accounts the WatermarkBuffer Factories should -// track. -message BufferFactoryConfig { - // The minimum power of two at which Envoy starts tracking an account. - // - // Envoy has 8 power of two buckets starting with the provided exponent below. - // Concretely the 1st bucket contains accounts for streams that use - // [2^minimum_account_to_track_power_of_two, - // 2^(minimum_account_to_track_power_of_two + 1)) bytes. - // With the 8th bucket tracking accounts - // >= 128 * 2^minimum_account_to_track_power_of_two. - // - // The maximum value is 56, since we're using uint64_t for bytes counting, - // and that's the last value that would use the 8 buckets. In practice, - // we don't expect the proxy to be holding 2^56 bytes. - // - // If omitted, Envoy should not do any tracking. - uint32 minimum_account_to_track_power_of_two = 1 [(validate.rules).uint32 = {lte: 56 gte: 10}]; -} - -message OverloadManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.OverloadManager"; - - // The interval for refreshing resource usage. - google.protobuf.Duration refresh_interval = 1; - - // The set of resources to monitor. - repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The set of overload actions. - repeated OverloadAction actions = 3; - - // Configuration for buffer factory. - BufferFactoryConfig buffer_factory_config = 4; -} diff --git a/generated_api_shadow/envoy/config/ratelimit/v2/BUILD b/generated_api_shadow/envoy/config/ratelimit/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/ratelimit/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto b/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto deleted file mode 100644 index 92801ea7b9689..0000000000000 --- a/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.ratelimit.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.ratelimit.v2"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate limit service] - -// Rate limit :ref:`configuration overview `. -message RateLimitServiceConfig { - reserved 1, 3; - - // Specifies the gRPC service that hosts the rate limit service. The client - // will connect to this cluster when it needs to make rate limit service - // requests. - api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto deleted file mode 100644 index 98889b1e28825..0000000000000 --- a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.config.ratelimit.v3; - -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.ratelimit.v3"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit service] - -// Rate limit :ref:`configuration overview `. -message RateLimitServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.ratelimit.v2.RateLimitServiceConfig"; - - reserved 1, 3; - - // Specifies the gRPC service that hosts the rate limit service. The client - // will connect to this cluster when it needs to make rate limit service - // requests. - core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and - // version of messages used on the wire. - core.v3.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/rbac/v2/BUILD b/generated_api_shadow/envoy/config/rbac/v2/BUILD deleted file mode 100644 index 4bce7466dddf7..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v2/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - ], -) diff --git a/generated_api_shadow/envoy/config/rbac/v2/rbac.proto b/generated_api_shadow/envoy/config/rbac/v2/rbac.proto deleted file mode 100644 index 943ac33e08590..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v2/rbac.proto +++ /dev/null @@ -1,240 +0,0 @@ -syntax = "proto3"; - -package envoy.config.rbac.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/matcher/metadata.proto"; -import "envoy/type/matcher/path.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/api/expr/v1alpha1/syntax.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.rbac.v2"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Role Based Access Control (RBAC)] - -// Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). -// -// Here is an example of RBAC configuration. It has two policies: -// -// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so -// does "cluster.local/ns/default/sa/superuser". -// -// * Any user can read ("GET") the service at paths with prefix "/products", so long as the -// destination port is either 80 or 443. -// -// .. code-block:: yaml -// -// action: ALLOW -// policies: -// "service-admin": -// permissions: -// - any: true -// principals: -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/admin" -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/superuser" -// "product-viewer": -// permissions: -// - and_rules: -// rules: -// - header: { name: ":method", exact_match: "GET" } -// - url_path: -// path: { prefix: "/products" } -// - or_rules: -// rules: -// - destination_port: 80 -// - destination_port: 443 -// principals: -// - any: true -// -message RBAC { - // Should we do safe-list or block-list style access control? - enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style - // access control. This is the default type. - ALLOW = 0; - - // The policies deny access to principals. The rest is allowed. This is block-list style - // access control. - DENY = 1; - } - - // The action to take if a policy matches. The request is allowed if and only if: - // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match - Action action = 1; - - // Maps from policy name to policy. A match occurs when at least one policy matches the request. - map policies = 2; -} - -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. -message Policy { - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. - repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. - repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; -} - -// Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 11] -message Permission { - // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, - // each are applied with the associated behavior. - message Set { - repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - oneof rule { - option (validate.required) = true; - - // A set of rules that all must match in order to define the action. - Set and_rules = 1; - - // A set of rules where at least one must match in order to define the action. - Set or_rules = 2; - - // When any is set, it matches any action. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. - api.v2.route.HeaderMatcher header = 4; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.PathMatcher url_path = 10; - - // A CIDR block that describes the destination IP. - api.v2.core.CidrRange destination_ip = 5; - - // A port number that describes the destination port connecting to. - uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; - - // Metadata that describes additional information about the action. - type.matcher.MetadataMatcher metadata = 7; - - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. - Permission not_rule = 8; - - // The request server from the client's connection request. This is - // typically TLS SNI. - // - // .. attention:: - // - // The behavior of this field may be affected by how Envoy is configured - // as explained below. - // - // * If the :ref:`TLS Inspector ` - // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, - // a TLS connection's requested SNI server name will be treated as if it - // wasn't present. - // - // * A :ref:`listener filter ` may - // overwrite a connection's requested server name within Envoy. - // - // Please refer to :ref:`this FAQ entry ` to learn to - // setup SNI. - type.matcher.StringMatcher requested_server_name = 9; - } -} - -// Principal defines an identity or a group of identities for a downstream subject. -// [#next-free-field: 12] -message Principal { - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. - message Set { - repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Authentication attributes for a downstream. - message Authenticated { - reserved 1; - - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. - type.matcher.StringMatcher principal_name = 2; - } - - oneof identifier { - option (validate.required) = true; - - // A set of identifiers that all must match in order to define the downstream. - Set and_ids = 1; - - // A set of identifiers at least one must match in order to define the downstream. - Set or_ids = 2; - - // When any is set, it matches any downstream. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // Authenticated attributes that identify the downstream. - Authenticated authenticated = 4; - - // A CIDR block that describes the downstream IP. - // This address will honor proxy protocol, but will not honor XFF. - api.v2.core.CidrRange source_ip = 5 [deprecated = true]; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. - api.v2.core.CidrRange direct_remote_ip = 10; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. - api.v2.core.CidrRange remote_ip = 11; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. - api.v2.route.HeaderMatcher header = 6; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.PathMatcher url_path = 9; - - // Metadata that describes additional information about the principal. - type.matcher.MetadataMatcher metadata = 7; - - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. - Principal not_id = 8; - } -} diff --git a/generated_api_shadow/envoy/config/rbac/v3/BUILD b/generated_api_shadow/envoy/config/rbac/v3/BUILD deleted file mode 100644 index c289def1f11d2..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v3/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - ], -) diff --git a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto deleted file mode 100644 index d66f9be2b4981..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto +++ /dev/null @@ -1,306 +0,0 @@ -syntax = "proto3"; - -package envoy.config.rbac.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/path.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/v3/range.proto"; - -import "google/api/expr/v1alpha1/checked.proto"; -import "google/api/expr/v1alpha1/syntax.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.rbac.v3"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Role Based Access Control (RBAC)] - -// Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. Requests are allowed or denied based on the `action` and whether a matching policy is -// found. For instance, if the action is ALLOW and a matching policy is found the request should be -// allowed. -// -// RBAC can also be used to make access logging decisions by communicating with access loggers -// through dynamic metadata. When the action is LOG and at least one policy matches, the -// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating -// the request should be logged. -// -// Here is an example of RBAC configuration. It has two policies: -// -// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so -// does "cluster.local/ns/default/sa/superuser". -// -// * Any user can read ("GET") the service at paths with prefix "/products", so long as the -// destination port is either 80 or 443. -// -// .. code-block:: yaml -// -// action: ALLOW -// policies: -// "service-admin": -// permissions: -// - any: true -// principals: -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/admin" -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/superuser" -// "product-viewer": -// permissions: -// - and_rules: -// rules: -// - header: -// name: ":method" -// string_match: -// exact: "GET" -// - url_path: -// path: { prefix: "/products" } -// - or_rules: -// rules: -// - destination_port: 80 -// - destination_port: 443 -// principals: -// - any: true -// -message RBAC { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.RBAC"; - - // Should we do safe-list or block-list style access control? - enum Action { - // The policies grant access to principals. The rest are denied. This is safe-list style - // access control. This is the default type. - ALLOW = 0; - - // The policies deny access to principals. The rest are allowed. This is block-list style - // access control. - DENY = 1; - - // The policies set the `access_log_hint` dynamic metadata key based on if requests match. - // All requests are allowed. - LOG = 2; - } - - // The action to take if a policy matches. Every action either allows or denies a request, - // and can also carry out action-specific operations. - // - // Actions: - // - // * ALLOW: Allows the request if and only if there is a policy that matches - // the request. - // * DENY: Allows the request if and only if there are no policies that - // match the request. - // * LOG: Allows all requests. If at least one policy matches, the dynamic - // metadata key `access_log_hint` is set to the value `true` under the shared - // key namespace 'envoy.common'. If no policies match, it is set to `false`. - // Other actions do not modify this key. - // - Action action = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maps from policy name to policy. A match occurs when at least one policy matches the request. - // The policies are evaluated in lexicographic order of the policy name. - map policies = 2; -} - -// Policy specifies a role and the principals that are assigned/denied the role. -// A policy matches if and only if at least one of its permissions match the -// action taking place AND at least one of its principals match the downstream -// AND the condition is true if specified. -message Policy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Policy"; - - // Required. The set of permissions that define a role. Each permission is - // matched with OR semantics. To match all actions for this policy, a single - // Permission with the `any` field set to true should be used. - repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Required. The set of principals that are assigned/denied the role based on - // “action”. Each principal is matched with OR semantics. To match all - // downstreams for this policy, a single Principal with the `any` field set to - // true should be used. - repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - // Only be used when checked_condition is not used. - google.api.expr.v1alpha1.Expr condition = 3 - [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; - - // [#not-implemented-hide:] - // An optional symbolic expression that has been successfully type checked. - // Only be used when condition is not used. - google.api.expr.v1alpha1.CheckedExpr checked_condition = 4 - [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; -} - -// Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 12] -message Permission { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission"; - - // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, - // each are applied with the associated behavior. - message Set { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v2.Permission.Set"; - - repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - oneof rule { - option (validate.required) = true; - - // A set of rules that all must match in order to define the action. - Set and_rules = 1; - - // A set of rules where at least one must match in order to define the action. - Set or_rules = 2; - - // When any is set, it matches any action. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. - route.v3.HeaderMatcher header = 4; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 10; - - // A CIDR block that describes the destination IP. - core.v3.CidrRange destination_ip = 5; - - // A port number that describes the destination port connecting to. - uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; - - // A port number range that describes a range of destination ports connecting to. - type.v3.Int32Range destination_port_range = 11; - - // Metadata that describes additional information about the action. - type.matcher.v3.MetadataMatcher metadata = 7; - - // Negates matching the provided permission. For instance, if the value of - // `not_rule` would match, this permission would not match. Conversely, if - // the value of `not_rule` would not match, this permission would match. - Permission not_rule = 8; - - // The request server from the client's connection request. This is - // typically TLS SNI. - // - // .. attention:: - // - // The behavior of this field may be affected by how Envoy is configured - // as explained below. - // - // * If the :ref:`TLS Inspector ` - // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name - // `, - // a TLS connection's requested SNI server name will be treated as if it - // wasn't present. - // - // * A :ref:`listener filter ` may - // overwrite a connection's requested server name within Envoy. - // - // Please refer to :ref:`this FAQ entry ` to learn to - // setup SNI. - type.matcher.v3.StringMatcher requested_server_name = 9; - } -} - -// Principal defines an identity or a group of identities for a downstream -// subject. -// [#next-free-field: 12] -message Principal { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; - - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. - // Depending on the context, each are applied with the associated behavior. - message Set { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v2.Principal.Set"; - - repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Authentication attributes for a downstream. - message Authenticated { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v2.Principal.Authenticated"; - - reserved 1; - - // The name of the principal. If set, The URI SAN or DNS SAN in that order - // is used from the certificate, otherwise the subject field is used. If - // unset, it applies to any user that is authenticated. - type.matcher.v3.StringMatcher principal_name = 2; - } - - oneof identifier { - option (validate.required) = true; - - // A set of identifiers that all must match in order to define the - // downstream. - Set and_ids = 1; - - // A set of identifiers at least one must match in order to define the - // downstream. - Set or_ids = 2; - - // When any is set, it matches any downstream. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // Authenticated attributes that identify the downstream. - Authenticated authenticated = 4; - - // A CIDR block that describes the downstream IP. - // This address will honor proxy protocol, but will not honor XFF. - core.v3.CidrRange source_ip = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is - // inferred from for example the x-forwarder-for header, proxy protocol, - // etc. - core.v3.CidrRange direct_remote_ip = 10; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip - // `. E.g, if the - // remote ip is inferred from for example the x-forwarder-for header, proxy - // protocol, etc. - core.v3.CidrRange remote_ip = 11; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP - // request. Only available for HTTP request. Note: the pseudo-header :path - // includes the query and fragment string. Use the `url_path` field if you - // want to match the URL path without the query and fragment string. - route.v3.HeaderMatcher header = 6; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 9; - - // Metadata that describes additional information about the principal. - type.matcher.v3.MetadataMatcher metadata = 7; - - // Negates matching the provided principal. For instance, if the value of - // `not_id` would match, this principal would not match. Conversely, if the - // value of `not_id` would not match, this principal would match. - Principal not_id = 8; - } -} diff --git a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD b/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto deleted file mode 100644 index 529622a071e77..0000000000000 --- a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.resource_monitor.fixed_heap.v2alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha"; -option java_outer_classname = "FixedHeapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Fixed heap] -// [#extension: envoy.resource_monitors.fixed_heap] - -// The fixed heap resource monitor reports the Envoy process memory pressure, computed as a -// fraction of currently reserved heap memory divided by a statically configured maximum -// specified in the FixedHeapConfig. -message FixedHeapConfig { - uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD b/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto b/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto deleted file mode 100644 index a9f056d2d29aa..0000000000000 --- a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.config.resource_monitor.injected_resource.v2alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha"; -option java_outer_classname = "InjectedResourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Injected resource] -// [#extension: envoy.resource_monitors.injected_resource] - -// The injected resource monitor allows injecting a synthetic resource pressure into Envoy -// via a text file, which must contain a floating-point number in the range [0..1] representing -// the resource pressure and be updated atomically by a symbolic link swap. -// This is intended primarily for integration tests to force Envoy into an overloaded state. -message InjectedResourceConfig { - string filename = 1 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD b/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto b/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto deleted file mode 100644 index c2b2e58a1823d..0000000000000 --- a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.config.retry.omit_canary_hosts.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.retry.omit_canary_hosts.v2"; -option java_outer_classname = "OmitCanaryHostsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.retry.host.omit_canary_hosts.v3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Omit Canary Hosts Predicate] -// [#extension: envoy.retry_host_predicates.omit_canary_hosts] - -message OmitCanaryHostsPredicate { -} diff --git a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/BUILD b/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto b/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto deleted file mode 100644 index d229cffef8ca9..0000000000000 --- a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.config.retry.omit_host_metadata.v2; - -import "envoy/api/v2/core/base.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.retry.omit_host_metadata.v2"; -option java_outer_classname = "OmitHostMetadataConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.retry.host.omit_host_metadata.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Omit host metadata retry predicate] - -// A retry host predicate that can be used to reject a host based on -// predefined metadata match criteria. -// [#extension: envoy.retry_host_predicates.omit_host_metadata] -message OmitHostMetadataConfig { - // Retry host predicate metadata match criteria. The hosts in - // the upstream cluster with matching metadata will be omitted while - // attempting a retry of a failed request. The metadata should be specified - // under the *envoy.lb* key. - api.v2.core.Metadata metadata_match = 1; -} diff --git a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD b/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto b/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto deleted file mode 100644 index f69c5054f9c9b..0000000000000 --- a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.config.retry.previous_hosts.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.retry.previous_hosts.v2"; -option java_outer_classname = "PreviousHostsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.retry.host.previous_hosts.v3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Previous Hosts Predicate] -// [#extension: envoy.retry_host_predicates.previous_hosts] - -message PreviousHostsPredicate { -} diff --git a/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD b/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto b/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto deleted file mode 100644 index 3fc400c053a7f..0000000000000 --- a/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.retry.previous_priorities; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.retry.previous_priorities"; -option java_outer_classname = "PreviousPrioritiesConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.retry.priority.previous_priorities.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Previous priorities retry selector] - -// A retry host selector that attempts to spread retries between priorities, even if certain -// priorities would not normally be attempted due to higher priorities being available. -// -// As priorities get excluded, load will be distributed amongst the remaining healthy priorities -// based on the relative health of the priorities, matching how load is distributed during regular -// host selection. For example, given priority healths of {100, 50, 50}, the original load will be -// {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load -// changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the -// remaining to spill over to P2. -// -// Each priority attempted will be excluded until there are no healthy priorities left, at which -// point the list of attempted priorities will be reset, essentially starting from the beginning. -// For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the -// following sequence of priorities would be selected (assuming update_frequency = 1): -// Attempt 1: P0 (P0 is 100% healthy) -// Attempt 2: P2 (P0 already attempted, P2 only healthy priority) -// Attempt 3: P0 (no healthy priorities, reset) -// Attempt 4: P2 -// -// In the case of all upstream hosts being unhealthy, no adjustments will be made to the original -// priority load, so behavior should be identical to not using this plugin. -// -// Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of -// priorities), which might incur significant overhead for clusters with many priorities. -// [#extension: envoy.retry_priorities.previous_priorities] -message PreviousPrioritiesConfig { - // How often the priority load should be updated based on previously attempted priorities. Useful - // to allow each priorities to receive more than one request before being excluded or to reduce - // the number of times that the priority load has to be recomputed. - // - // For example, by setting this to 2, then the first two attempts (initial attempt and first - // retry) will use the unmodified priority load. The third and fourth attempt will use priority - // load which excludes the priorities routed to with the first two attempts, and the fifth and - // sixth attempt will use the priority load excluding the priorities used for the first four - // attempts. - // - // Must be greater than 0. - int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/config/route/v3/BUILD b/generated_api_shadow/envoy/config/route/v3/BUILD deleted file mode 100644 index 81cdfdf8a93a6..0000000000000 --- a/generated_api_shadow/envoy/config/route/v3/BUILD +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/metadata/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/route/v3/route.proto b/generated_api_shadow/envoy/config/route/v3/route.proto deleted file mode 100644 index e2bf52165be92..0000000000000 --- a/generated_api_shadow/envoy/config/route/v3/route.proto +++ /dev/null @@ -1,142 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/route/v3/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v3"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP route configuration] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// [#next-free-field: 13] -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RouteConfiguration"; - - // The name of the route configuration. For example, it might match - // :ref:`route_config_name - // ` in - // :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. - string name = 1; - - // An array of virtual hosts that make up the route table. - repeated VirtualHost virtual_hosts = 2; - - // An array of virtual hosts will be dynamically loaded via the VHDS API. - // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for - // on-demand discovery of virtual hosts. The contents of these two fields will be merged to - // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration - // taking precedence. - Vhds vhds = 9; - - // Optionally specifies a list of HTTP headers that the connection manager - // will consider to be internal only. If they are found on external requests they will be cleaned - // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information. - repeated string internal_only_headers = 3 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each response that - // the connection manager encodes. Headers specified at this level are applied - // after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // that the connection manager encodes. - repeated string response_headers_to_remove = 5 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each request - // routed by the HTTP connection manager. Headers specified at this level are - // applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // routed by the HTTP connection manager. - repeated string request_headers_to_remove = 8 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // By default, headers that should be added/removed are evaluated from most to least specific: - // - // * route level - // * virtual host level - // * connection manager level - // - // To allow setting overrides at the route or virtual host level, this order can be reversed - // by setting this option to true. Defaults to false. - // - // [#next-major-version: In the v3 API, this will default to true.] - bool most_specific_header_mutations_wins = 10; - - // An optional boolean that specifies whether the clusters that the route - // table refers to will be validated by the cluster manager. If set to true - // and a route refers to a non-existent cluster, the route table will not - // load. If set to false and a route refers to a non-existent cluster, the - // route table will load and the router filter will return a 404 if the route - // is selected at runtime. This setting defaults to true if the route table - // is statically defined via the :ref:`route_config - // ` - // option. This setting default to false if the route table is loaded dynamically via the - // :ref:`rds - // ` - // option. Users may wish to override the default behavior in certain cases (for example when - // using CDS with a static route table). - google.protobuf.BoolValue validate_clusters = 7; - - // The maximum bytes of the response :ref:`direct response body - // ` size. If not specified the default - // is 4096. - // - // .. warning:: - // - // Envoy currently holds the content of :ref:`direct response body - // ` in memory. Be careful setting - // this to be larger than the default 4KB, since the allocated memory for direct response body - // is not subject to data plane buffering controls. - // - google.protobuf.UInt32Value max_direct_response_body_size_bytes = 11; - - // [#not-implemented-hide:] - // A list of plugins and their configurations which may be used by a - // :ref:`envoy_v3_api_field_config.route.v3.RouteAction.cluster_specifier_plugin` - // within the route. All *extension.name* fields in this list must be unique. - repeated ClusterSpecifierPlugin cluster_specifier_plugins = 12; -} - -// Configuration for a cluster specifier plugin. -message ClusterSpecifierPlugin { - // The name of the plugin and its opaque configuration. - core.v3.TypedExtensionConfig extension = 1; -} - -message Vhds { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Vhds"; - - // Configuration source specifier for VHDS. - core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto deleted file mode 100644 index 8930f9ec8dff3..0000000000000 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ /dev/null @@ -1,2106 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/proxy_protocol.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/regex.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/metadata/v3/metadata.proto"; -import "envoy/type/tracing/v3/custom_tag.proto"; -import "envoy/type/v3/percent.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v3"; -option java_outer_classname = "RouteComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP route components] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// The top level element in the routing configuration is a virtual host. Each virtual host has -// a logical name as well as a set of domains that get routed to it based on the incoming request's -// host header. This allows a single listener to service multiple top level domain path trees. Once -// a virtual host is selected based on the domain, the routes are processed in order to see which -// upstream cluster to route to or whether to perform a redirect. -// [#next-free-field: 21] -message VirtualHost { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualHost"; - - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - - reserved 9; - - // The logical name of the virtual host. This is used when emitting certain - // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A list of domains (host/authority header) that will be matched to this - // virtual host. Wildcard hosts are supported in the suffix or prefix form. - // - // Domain search order: - // 1. Exact domain names: ``www.foo.com``. - // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. - // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. - // 4. Special wildcard ``*`` matching any domain. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - // The longest wildcards match first. - // Only a single virtual host in the entire route configuration can match on ``*``. A domain - // must be unique across all virtual hosts or the config will fail to load. - // - // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. - repeated string domains = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} - }]; - - // The list of routes that will be matched, in order, for incoming requests. - // The first route that matches will be used. - repeated Route routes = 3; - - // Specifies the type of TLS enforcement the virtual host expects. If this option is not - // specified, there is no TLS requirement for the virtual host. - TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; - - // A list of virtual clusters defined for this virtual host. Virtual clusters - // are used for additional statistics gathering. - repeated VirtualCluster virtual_clusters = 5; - - // Specifies a set of rate limit configurations that will be applied to the - // virtual host. - repeated RateLimit rate_limits = 6; - - // Specifies a list of HTTP headers that should be added to each request - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // handled by this virtual host. - repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of HTTP headers that should be added to each response - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // handled by this virtual host. - repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Indicates that the virtual host has a CORS policy. - CorsPolicy cors = 8; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 15; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the upstream request. Setting this option will cause it to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the upstream - // will see the attempt count as perceived by the second Envoy. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - // - // [#next-major-version: rename to include_attempt_count_in_request.] - bool include_request_attempt_count = 14; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the downstream response. Setting this option will cause the router to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the downstream - // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - bool include_attempt_count_in_response = 19; - - // Indicates the retry policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - RetryPolicy retry_policy = 16; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that setting a route level entry - // will take precedence over this config and it'll be treated independently (e.g.: values are not - // inherited). :ref:`Retry policy ` should not be - // set if this field is used. - google.protobuf.Any retry_policy_typed_config = 20; - - // Indicates the hedge policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - HedgePolicy hedge_policy = 17; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum - // value of this and the listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; - - map hidden_envoy_deprecated_per_filter_config = 12 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// A filter-defined action type. -message FilterAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.FilterAction"; - - google.protobuf.Any action = 1; -} - -// A route is both a specification of how to match a request as well as an indication of what to do -// next (e.g., redirect, forward, rewrite, etc.). -// -// .. attention:: -// -// Envoy supports routing on HTTP method via :ref:`header matching -// `. -// [#next-free-field: 19] -message Route { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Route"; - - reserved 6; - - // Name for the route. - string name = 14; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - oneof action { - option (validate.required) = true; - - // Route request to some upstream cluster. - RouteAction route = 2; - - // Return a redirect. - RedirectAction redirect = 3; - - // Return an arbitrary HTTP response directly, without proxying. - DirectResponseAction direct_response = 7; - - // [#not-implemented-hide:] - // A filter-defined action (e.g., it could dynamically generate the RouteAction). - // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when - // implemented] - FilterAction filter_action = 17; - - // [#not-implemented-hide:] - // An action used when the route will generate a response directly, - // without forwarding to an upstream host. This will be used in non-proxy - // xDS clients like the gRPC server. It could also be used in the future - // in Envoy for a filter that directly generates responses for requests. - NonForwardingAction non_forwarding_action = 18; - } - - // The Metadata field can be used to provide additional information - // about the route. It can be used for configuration, stats, and logging. - // The metadata should go under the filter namespace that will need it. - // For instance, if the metadata is intended for the Router filter, - // the filter name should be specified as *envoy.filters.http.router*. - core.v3.Metadata metadata = 4; - - // Decorator for the matched route. - Decorator decorator = 5; - - // The typed_per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 13; - - // Specifies a set of headers that will be added to requests matching this - // route. Headers specified at this level are applied before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // matching this route. - repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a set of headers that will be added to responses to requests - // matching this route. Headers specified at this level are applied before - // headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on - // :ref:`custom request headers `. - repeated core.v3.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - Tracing tracing = 15; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set, the bytes actually buffered will be the minimum value of this and the - // listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; - - map hidden_envoy_deprecated_per_filter_config = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// Compared to the :ref:`cluster ` field that specifies a -// single upstream cluster as the target of a request, the :ref:`weighted_clusters -// ` option allows for specification of -// multiple upstream clusters along with weights that indicate the percentage of -// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the -// weights. -message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster"; - - // [#next-free-field: 13] - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.WeightedCluster.ClusterWeight"; - - reserved 7; - - // Only one of *name* and *cluster_header* may be specified. - // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] - // Name of the upstream cluster. The cluster must exist in the - // :ref:`cluster manager configuration `. - string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier"]; - - // Only one of *name* and *cluster_header* may be specified. - // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string cluster_header = 12 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, - (udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier" - ]; - - // An integer between 0 and :ref:`total_weight - // `. When a request matches the route, - // the choice of an upstream cluster is determined by its weight. The sum of weights across all - // entries in the clusters array must add up to the total_weight, which defaults to 100. - google.protobuf.UInt32Value weight = 2; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered for - // load balancing. Note that this will be merged with what's provided in - // :ref:`RouteAction.metadata_match `, with - // values here taking precedence. The filter name should be specified as *envoy.lb*. - core.v3.Metadata metadata_match = 3; - - // Specifies a list of headers to be added to requests when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request when - // this cluster is selected through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of headers to be added to responses when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of headers to be removed from responses when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 10; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite_literal = 11 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - map hidden_envoy_deprecated_per_filter_config = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the total weight across all clusters. The sum of all cluster weights must equal this - // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies the runtime key prefix that should be used to construct the - // runtime keys associated with each cluster. When the *runtime_key_prefix* is - // specified, the router will look for weights associated with each upstream - // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - // *cluster[i]* denotes an entry in the clusters array field. If the runtime - // key for the cluster does not exist, the value specified in the - // configuration file will be used as the default weight. See the :ref:`runtime documentation - // ` for how key names map to the underlying implementation. - string runtime_key_prefix = 2; -} - -// [#next-free-field: 14] -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; - - message GrpcRouteMatchOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteMatch.GrpcRouteMatchOptions"; - } - - message TlsContextMatchOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteMatch.TlsContextMatchOptions"; - - // If specified, the route will match against whether or not a certificate is presented. - // If not specified, certificate presentation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue presented = 1; - - // If specified, the route will match against whether or not a certificate is validated. - // If not specified, certificate validation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue validated = 2; - } - - // An extensible message for matching CONNECT requests. - message ConnectMatcher { - } - - reserved 5; - - oneof path_specifier { - option (validate.required) = true; - - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - string prefix = 1; - - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - string path = 2; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - - // If this is used as the matcher, the matcher will only match CONNECT requests. - // Note that this will not match HTTP/2 upgrade-style CONNECT requests - // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style - // upgrades. - // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, - // where Extended CONNECT requests may have a path, the path matchers will work if - // there is a path present. - // Note that CONNECT support is currently considered alpha in Envoy. - // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] - ConnectMatcher connect_matcher = 12; - - string hidden_envoy_deprecated_regex = 3 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // Indicates that prefix/path matching should be case sensitive. The default - // is true. Ignored for safe_regex matching. - google.protobuf.BoolValue case_sensitive = 4; - - // Indicates that the route should additionally match on a runtime key. Every time the route - // is considered for a match, it must also fall under the percentage of matches indicated by - // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the router continues to evaluate the remaining match criteria. A runtime_fraction - // route configuration can be used to roll out route changes in a gradual manner without full - // code/config deploys. Refer to the :ref:`traffic shifting - // ` docs for additional documentation. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an - // integer with the assumption that the value is an integral percentage out of 100. For - // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent - // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - core.v3.RuntimeFractionalPercent runtime_fraction = 9; - - // Specifies a set of headers that the route should match on. The router will - // check the request’s headers against all the specified headers in the route - // config. A match will happen if all the headers in the route are present in - // the request with the same values (or based on presence if the value field - // is not in the config). - repeated HeaderMatcher headers = 6; - - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - repeated QueryParameterMatcher query_parameters = 7; - - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - GrpcRouteMatchOptions grpc = 8; - - // If specified, the client tls context will be matched against the defined - // match options. - // - // [#next-major-version: unify with RBAC] - TlsContextMatchOptions tls_context = 11; - - // Specifies a set of dynamic metadata matchers on which the route should match. - // The router will check the dynamic metadata against all the specified dynamic metadata matchers. - // If the number of specified dynamic metadata matchers is nonzero, they all must match the - // dynamic metadata for a match to occur. - repeated type.matcher.v3.MetadataMatcher dynamic_metadata = 13; -} - -// [#next-free-field: 12] -message CorsPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.CorsPolicy"; - - // Specifies string patterns that match allowed origins. An origin is allowed if any of the - // string matchers match. - repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; - - // Specifies the content for the *access-control-allow-methods* header. - string allow_methods = 2; - - // Specifies the content for the *access-control-allow-headers* header. - string allow_headers = 3; - - // Specifies the content for the *access-control-expose-headers* header. - string expose_headers = 4; - - // Specifies the content for the *access-control-max-age* header. - string max_age = 5; - - // Specifies whether the resource allows credentials. - google.protobuf.BoolValue allow_credentials = 6; - - oneof enabled_specifier { - // Specifies the % of requests for which the CORS filter is enabled. - // - // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - // filter will be enabled for 100% of the requests. - // - // If :ref:`runtime_key ` is - // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - core.v3.RuntimeFractionalPercent filter_enabled = 9; - - google.protobuf.BoolValue hidden_envoy_deprecated_enabled = 7 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not - // enforced. - // - // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those - // fields have to explicitly disable the filter in order for this setting to take effect. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* to determine if it's valid but will not enforce any policies. - core.v3.RuntimeFractionalPercent shadow_enabled = 10; - - repeated string hidden_envoy_deprecated_allow_origin = 1 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - repeated string hidden_envoy_deprecated_allow_origin_regex = 8 [ - deprecated = true, - (validate.rules).repeated = {items {string {max_bytes: 1024}}}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; -} - -// [#next-free-field: 38] -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; - - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - - // Configures :ref:`internal redirect ` behavior. - // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] - enum InternalRedirectAction { - option deprecated = true; - - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - // - // .. note:: - // - // Shadowing will not be triggered if the primary cluster does not exist. - message RequestMirrorPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.RequestMirrorPolicy"; - - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // If not specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - core.v3.RuntimeFractionalPercent runtime_fraction = 3; - - // Determines if the trace span should be sampled. Defaults to true. - google.protobuf.BoolValue trace_sampled = 4; - - string hidden_envoy_deprecated_runtime_key = 2 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - // [#next-free-field: 7] - message HashPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy"; - - message Header { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.Header"; - - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If specified, the request header value will be rewritten and used - // to produce the hash key. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.Cookie"; - - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.ConnectionProperties"; - - // Hash on source IP address. - bool source_ip = 1; - } - - message QueryParameter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.QueryParameter"; - - // The name of the URL query parameter that will be used to obtain the hash - // key. If the parameter is not present, no hash will be produced. Query - // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_len: 1}]; - } - - message FilterState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.FilterState"; - - // The name of the Object in the per-request filterState, which is an - // Envoy::Http::Hashable object. If there is no data associated with the key, - // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - - // Query parameter hash policy. - QueryParameter query_parameter = 5; - - // Filter state hash policy. - FilterState filter_state = 6; - } - - // The flag that short-circuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:`upgrade_configs - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.UpgradeConfig"; - - // Configuration for sending data upstream as a raw data payload. This is used for - // CONNECT or POST requests, when forwarding request payload as raw TCP. - message ConnectConfig { - // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. - core.v3.ProxyProtocolConfig proxy_protocol_config = 1; - - // If set, the route will also allow forwarding POST payload as raw TCP. - bool allow_post = 2; - } - - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - - // Configuration for sending data upstream as a raw data payload. This is used for - // CONNECT requests, when forwarding CONNECT payload as raw TCP. - // Note that CONNECT support is currently considered alpha in Envoy. - // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] - ConnectConfig connect_config = 3; - } - - message MaxStreamDuration { - // Specifies the maximum duration allowed for streams on the route. If not specified, the value - // from the :ref:`max_stream_duration - // ` field in - // :ref:`HttpConnectionManager.common_http_protocol_options - // ` - // is used. If this field is set explicitly to zero, any - // HttpConnectionManager max_stream_duration timeout will be disabled for - // this route. - google.protobuf.Duration max_stream_duration = 1; - - // If present, and the request contains a `grpc-timeout header - // `_, use that value as the - // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. - // If set to 0, the `grpc-timeout` header is used without modification. - google.protobuf.Duration grpc_timeout_header_max = 2; - - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by - // subtracting the provided duration from the header. This is useful for allowing Envoy to set - // its global timeout to be less than that of the deadline imposed by the calling client, which - // makes it more likely that Envoy will handle the timeout instead of having the call canceled - // by the client. If, after applying the offset, the resulting timeout is zero or negative, - // the stream will timeout immediately. - google.protobuf.Duration grpc_timeout_header_offset = 3; - } - - reserved 12, 18, 19, 16, 22, 21; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string cluster_header = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - WeightedCluster weighted_clusters = 3; - - // [#not-implemented-hide:] - // Name of the cluster specifier plugin to use to determine the cluster for - // requests on this route. The plugin name must be defined in the associated - // :ref:`envoy_v3_api_field_config.route.v3.RouteConfiguration.cluster_specifier_plugins` - // in the - // :ref:`envoy_v3_api_field_config.core.v3.TypedExtensionConfig.name` field. - string cluster_specifier_plugin = 37; - } - - // The HTTP status code to use when configured cluster is not found. - // The default response code is 503 Service Unavailable. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what's set in this field will be considered - // for load balancing. If using :ref:`weighted_clusters - // `, metadata will be merged, with values - // provided there taking precedence. The filter name should be specified as *envoy.lb*. - core.v3.Metadata metadata_match = 4; - - // Indicates that during forwarding, the matched prefix (or path) should be - // swapped with this value. This option allows application URLs to be rooted - // at a different path from those exposed at the reverse proxy layer. The router filter will - // place the original path before rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of *prefix_rewrite* or - // :ref:`regex_rewrite ` - // may be specified. - // - // .. attention:: - // - // Pay careful attention to the use of trailing slashes in the - // :ref:`route's match ` prefix value. - // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single - // :ref:`Route `, as shown by the below config entries: - // - // .. code-block:: yaml - // - // - match: - // prefix: "/prefix/" - // route: - // prefix_rewrite: "/" - // - match: - // prefix: "/prefix" - // route: - // prefix_rewrite: "/" - // - // Having above entries in the config, requests to */prefix* will be stripped to */*, while - // requests to */prefix/etc* will be stripped to */etc*. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. The router filter will place the original path as it was - // before the rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of :ref:`prefix_rewrite ` - // or *regex_rewrite* may be specified. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite_literal = 6 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - google.protobuf.BoolValue auto_host_rewrite = 7; - - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string host_rewrite_header = 29 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Indicates that during forwarding, the host header will be swapped with - // the result of the regex substitution executed on path value with query and fragment removed. - // This is useful for transitioning variable content between path segment and subdomain. - // - // For example with the following config: - // - // .. code-block:: yaml - // - // host_rewrite_path_regex: - // pattern: - // google_re2: {} - // regex: "^/(.+)/.+$" - // substitution: \1 - // - // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`. - type.matcher.v3.RegexMatchAndSubstitute host_rewrite_path_regex = 35; - } - - // Specifies the upstream timeout for the route. If not specified, the default is 15s. This - // spans between the point at which the entire downstream request (i.e. end-of-stream) has been - // processed and when the upstream response has been completely processed. A value of 0 will - // disable the route's timeout. - // - // .. note:: - // - // This timeout includes all retries. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration timeout = 8; - - // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, - // although the connection manager wide :ref:`stream_idle_timeout - // ` - // will still apply. A value of 0 will completely disable the route's idle timeout, even if a - // connection manager stream idle timeout is configured. - // - // The idle timeout is distinct to :ref:`timeout - // `, which provides an upper bound - // on the upstream response time; :ref:`idle_timeout - // ` instead bounds the amount - // of time the request's stream may be idle. - // - // After header decoding, the idle timeout will apply on downstream and - // upstream request events. Each time an encode/decode event for headers or - // data is processed for the stream, the timer will be reset. If the timeout - // fires, the stream is terminated with a 408 Request Timeout error code if no - // upstream response header has been received, otherwise a stream reset - // occurs. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled according to the value for - // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - google.protobuf.Duration idle_timeout = 24; - - // Indicates that the route has a retry policy. Note that if this is set, - // it'll take precedence over the virtual host level retry policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - RetryPolicy retry_policy = 9; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that if this is set, it'll take - // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, - // most internal one becomes the enforced policy). :ref:`Retry policy ` - // should not be set if this field is used. - google.protobuf.Any retry_policy_typed_config = 33; - - // Indicates that the route has request mirroring policies. - repeated RequestMirrorPolicy request_mirror_policies = 30; - - // Optionally specifies the :ref:`routing priority `. - core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; - - // Specifies a set of rate limit configurations that could be applied to the - // route. - repeated RateLimit rate_limits = 13; - - // Specifies if the rate limit filter should include the virtual host rate - // limits. By default, if the route configured rate limits, the virtual host - // :ref:`rate_limits ` are not applied to the - // request. - // - // This field is deprecated. Please use :ref:`vh_rate_limits ` - google.protobuf.BoolValue include_vh_rate_limits = 14 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Specifies a list of hash policies to use for ring hash load balancing. Each - // hash policy is evaluated individually and the combined result is used to - // route the request. The method of combination is deterministic such that - // identical lists of hash policies will produce the same hash. Since a hash - // policy examines specific parts of a request, it can fail to produce a hash - // (i.e. if the hashed header is not present). If (and only if) all configured - // hash policies fail to generate a hash, no hash will be produced for - // the route. In this case, the behavior is the same as if no hash policies - // were specified (i.e. the ring hash load balancer will choose a random - // backend). If a hash policy has the "terminal" attribute set to true, and - // there is already a hash generated, the hash is returned immediately, - // ignoring the rest of the hash policy list. - repeated HashPolicy hash_policy = 15; - - // Indicates that the route has a CORS policy. - CorsPolicy cors = 17; - - // Deprecated by :ref:`grpc_timeout_header_max ` - // If present, and the request is a gRPC request, use the - // `grpc-timeout header `_, - // or its default value (infinity) instead of - // :ref:`timeout `, but limit the applied timeout - // to the maximum value specified here. If configured as 0, the maximum allowed timeout for - // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used - // and gRPC requests time out like any other requests using - // :ref:`timeout ` or its default. - // This can be used to prevent unexpected upstream request timeouts due to potentially long - // time gaps between gRPC request and response in gRPC streaming mode. - // - // .. note:: - // - // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - // precedence over `grpc-timeout header `_, when - // both are present. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration max_grpc_timeout = 23 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Deprecated by :ref:`grpc_timeout_header_offset `. - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - google.protobuf.Duration grpc_timeout_offset = 28 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - repeated UpgradeConfig upgrade_configs = 25; - - // If present, Envoy will try to follow an upstream redirect response instead of proxying the - // response back to the downstream. An upstream redirect response is defined - // by :ref:`redirect_response_codes - // `. - InternalRedirectPolicy internal_redirect_policy = 34; - - InternalRedirectAction internal_redirect_action = 26 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - HedgePolicy hedge_policy = 27; - - // Specifies the maximum stream duration for this route. - MaxStreamDuration max_stream_duration = 36; - - RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// HTTP retry :ref:`architecture overview `. -// [#next-free-field: 12] -message RetryPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy"; - - enum ResetHeaderFormat { - SECONDS = 0; - UNIX_TIMESTAMP = 1; - } - - message RetryPriority { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RetryPolicy.RetryPriority"; - - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // [#extension-category: envoy.retry_priorities] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - message RetryHostPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RetryPolicy.RetryHostPredicate"; - - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // [#extension-category: envoy.retry_host_predicates] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - message RetryBackOff { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RetryPolicy.RetryBackOff"; - - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - message ResetHeader { - // The name of the reset header. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The format of the reset header. - ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // A retry back-off strategy that applies when the upstream server rate limits - // the request. - // - // Given this configuration: - // - // .. code-block:: yaml - // - // rate_limited_retry_back_off: - // reset_headers: - // - name: Retry-After - // format: SECONDS - // - name: X-RateLimit-Reset - // format: UNIX_TIMESTAMP - // max_interval: "300s" - // - // The following algorithm will apply: - // - // 1. If the response contains the header ``Retry-After`` its value must be on - // the form ``120`` (an integer that represents the number of seconds to - // wait before retrying). If so, this value is used as the back-off interval. - // 2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its - // value must be on the form ``1595320702`` (an integer that represents the - // point in time at which to retry, as a Unix timestamp in seconds). If so, - // the current time is subtracted from this value and the result is used as - // the back-off interval. - // 3. Otherwise, Envoy will use the default - // :ref:`exponential back-off ` - // strategy. - // - // No matter which format is used, if the resulting back-off interval exceeds - // ``max_interval`` it is discarded and the next header in ``reset_headers`` - // is tried. If a request timeout is configured for the route it will further - // limit how long the request will be allowed to run. - // - // To prevent many clients retrying at the same point in time jitter is added - // to the back-off interval, so the resulting interval is decided by taking: - // ``random(interval, interval * 1.5)``. - // - // .. attention:: - // - // Configuring ``rate_limited_retry_back_off`` will not by itself cause a request - // to be retried. You will still need to configure the right retry policy to match - // the responses from the upstream server. - message RateLimitedRetryBackOff { - // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) - // to match against the response. Headers are tried in order, and matched case - // insensitive. The first header to be parsed successfully is used. If no headers - // match the default exponential back-off is used instead. - repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the maximum back off interval that Envoy will allow. If a reset - // header contains an interval longer than this then it will be discarded and - // the next header will be tried. Defaults to 300 seconds. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - // Specifies the conditions under which retry takes place. These are the same - // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - string retry_on = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. These are the same conditions documented for - // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2 - [(udpa.annotations.field_migrate).rename = "max_retries"]; - - // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - // same conditions documented for - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - // - // .. note:: - // - // If left unspecified, Envoy will use the global - // :ref:`route timeout ` for the request. - // Consequently, when using a :ref:`5xx ` based - // retry policy, a request that times out will not be retried as the total timeout budget - // would have been exhausted. - google.protobuf.Duration per_try_timeout = 3; - - // Specifies an implementation of a RetryPriority which is used to determine the - // distribution of load across priorities used for retries. Refer to - // :ref:`retry plugin configuration ` for more details. - RetryPriority retry_priority = 4; - - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host - // for retries. If any of the predicates reject the host, host selection will be reattempted. - // Refer to :ref:`retry plugin configuration ` for more - // details. - repeated RetryHostPredicate retry_host_predicate = 5; - - // The maximum number of times host selection will be reattempted before giving up, at which - // point the host that was last selected will be routed to. If unspecified, this will default to - // retrying once. - int64 host_selection_retry_max_attempts = 6; - - // HTTP status codes that should trigger a retry in addition to those specified by retry_on. - repeated uint32 retriable_status_codes = 7; - - // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the - // default base interval is 25 milliseconds or, if set, the current value of the - // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times - // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - // describes Envoy's back-off algorithm. - RetryBackOff retry_back_off = 8; - - // Specifies parameters that control a retry back-off strategy that is used - // when the request is rate limited by the upstream server. The server may - // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to - // provide feedback to the client on how long to wait before retrying. If - // configured, this back-off strategy will be used instead of the - // default exponential back off strategy (configured using `retry_back_off`) - // whenever a response includes the matching headers. - RateLimitedRetryBackOff rate_limited_retry_back_off = 11; - - // HTTP response headers that trigger a retry if present in the response. A retry will be - // triggered if any of the header matches match the upstream response headers. - // The field is only consulted if 'retriable-headers' retry policy is active. - repeated HeaderMatcher retriable_headers = 9; - - // HTTP headers which must be present in the request for retries to be attempted. - repeated HeaderMatcher retriable_request_headers = 10; -} - -// HTTP request hedging :ref:`architecture overview `. -message HedgePolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HedgePolicy"; - - // Specifies the number of initial requests that should be sent upstream. - // Must be at least 1. - // Defaults to 1. - // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies a probability that an additional upstream request should be sent - // on top of what is specified by initial_requests. - // Defaults to 0. - // [#not-implemented-hide:] - type.v3.FractionalPercent additional_request_chance = 2; - - // Indicates that a hedged request should be sent when the per-try timeout is hit. - // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. - // The first request to complete successfully will be the one returned to the caller. - // - // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. - // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client - // if there are no more retries left. - // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. - // - // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least - // one error code and specifies a maximum number of retries. - // - // Defaults to false. - bool hedge_on_per_try_timeout = 3; -} - -// [#next-free-field: 10] -message RedirectAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RedirectAction"; - - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - - // When the scheme redirection take place, the following rules apply: - // 1. If the source URI scheme is `http` and the port is explicitly - // set to `:80`, the port will be removed after the redirection - // 2. If the source URI scheme is `https` and the port is explicitly - // set to `:443`, the port will be removed after the redirection - oneof scheme_rewrite_specifier { - // The scheme portion of the URL will be swapped with "https". - bool https_redirect = 4; - - // The scheme portion of the URL will be swapped with this value. - string scheme_redirect = 7; - } - - // The host portion of the URL will be swapped with this value. - string host_redirect = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The port value of the URL will be swapped with this value. - uint32 port_redirect = 8; - - oneof path_rewrite_specifier { - // The path portion of the URL will be swapped with this value. - // Please note that query string in path_redirect will override the - // request's query string and will not be stripped. - // - // For example, let's say we have the following routes: - // - // - match: { path: "/old-path-1" } - // redirect: { path_redirect: "/new-path-1" } - // - match: { path: "/old-path-2" } - // redirect: { path_redirect: "/new-path-2", strip-query: "true" } - // - match: { path: "/old-path-3" } - // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } - // - // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" - // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" - // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" - string path_redirect = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirection, the matched prefix (or path) - // should be swapped with this value. This option allows redirect URLs be dynamically created - // based on the request. - // - // .. attention:: - // - // Pay attention to the use of trailing slashes as mentioned in - // :ref:`RouteAction's prefix_rewrite `. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirect, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 9; - } - - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; - - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. - bool strip_query = 6; -} - -message DirectResponseAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.DirectResponseAction"; - - // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; - - // Specifies the content of the response body. If this setting is omitted, - // no body is included in the generated response. - // - // .. note:: - // - // Headers can be specified using *response_headers_to_add* in the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or - // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. - core.v3.DataSource body = 2; -} - -// [#not-implemented-hide:] -message NonForwardingAction { -} - -message Decorator { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Decorator"; - - // The operation name associated with the request matched to this route. If tracing is - // enabled, this information will be used as the span name reported for this request. - // - // .. note:: - // - // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden - // by the :ref:`x-envoy-decorator-operation - // ` header. - string operation = 1 [(validate.rules).string = {min_len: 1}]; - - // Whether the decorated details should be propagated to the other party. The default is true. - google.protobuf.BoolValue propagate = 2; -} - -message Tracing { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Tracing"; - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.v3.FractionalPercent client_sampling = 1; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.FractionalPercent random_sampling = 2; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.FractionalPercent overall_sampling = 3; - - // A list of custom tags with unique tag name to create tags for the active span. - // It will take effect after merging with the :ref:`corresponding configuration - // ` - // configured in the HTTP connection manager. If two tags with the same name are configured - // each in the HTTP connection manager and the route level, the one configured here takes - // priority. - repeated type.tracing.v3.CustomTag custom_tags = 4; -} - -// A virtual cluster is a way of specifying a regex matching rule against -// certain important endpoints such that statistics are generated explicitly for -// the matched requests. The reason this is useful is that when doing -// prefix/path matching Envoy does not always know what the application -// considers to be an endpoint. Thus, it’s impossible for Envoy to generically -// emit per endpoint statistics. However, often systems have highly critical -// endpoints that they wish to get “perfect” statistics on. Virtual cluster -// statistics are perfect in the sense that they are emitted on the downstream -// side such that they include network level failures. -// -// Documentation for :ref:`virtual cluster statistics `. -// -// .. note:: -// -// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for -// every application endpoint. This is both not easily maintainable and as well the matching and -// statistics output are not free. -message VirtualCluster { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualCluster"; - - // Specifies a list of header matchers to use for matching requests. Each specified header must - // match. The pseudo-headers `:path` and `:method` can be used to match the request path and - // method, respectively. - repeated HeaderMatcher headers = 4; - - // Specifies the name of the virtual cluster. The virtual cluster name as well - // as the virtual host name are used when emitting statistics. The statistics are emitted by the - // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_len: 1}]; - - string hidden_envoy_deprecated_pattern = 1 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - core.v3.RequestMethod hidden_envoy_deprecated_method = 3 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// Global rate limiting :ref:`architecture overview `. -// Also applies to Local rate limiting :ref:`using descriptors `. -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - - // [#next-free-field: 10] - message Action { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action"; - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("source_cluster", "") - // - // is derived from the :option:`--service-cluster` option. - message SourceCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.SourceCluster"; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("destination_cluster", "") - // - // Once a request matches against a route table rule, a routed cluster is determined by one of - // the following :ref:`route table configuration ` - // settings: - // - // * :ref:`cluster ` indicates the upstream cluster - // to route to. - // * :ref:`weighted_clusters ` - // chooses a cluster randomly from a set of clusters with attributed weight. - // * :ref:`cluster_header ` indicates which - // header in the request contains the target cluster. - message DestinationCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.DestinationCluster"; - } - - // The following descriptor entry is appended when a header contains a key that matches the - // *header_name*: - // - // .. code-block:: cpp - // - // ("", "") - message RequestHeaders { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.RequestHeaders"; - - // The header name to be queried from the request headers. The header’s - // value is used to populate the value of the descriptor entry for the - // descriptor_key. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; - - // If set to true, Envoy skips the descriptor while calling rate limiting service - // when header is not present in the request. By default it skips calling the - // rate limiting service if this header is not present in the request. - bool skip_if_absent = 3; - } - - // The following descriptor entry is appended to the descriptor and is populated using the - // trusted address from :ref:`x-forwarded-for `: - // - // .. code-block:: cpp - // - // ("remote_address", "") - message RemoteAddress { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.RemoteAddress"; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("generic_key", "") - message GenericKey { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.GenericKey"; - - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; - - // An optional key to use in the descriptor entry. If not set it defaults - // to 'generic_key' as the descriptor key. - string descriptor_key = 2; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("header_match", "") - message HeaderValueMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.HeaderValueMatch"; - - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; - - // If set to true, the action will append a descriptor entry when the - // request matches the headers. If set to false, the action will append a - // descriptor entry when the request does not match the headers. The - // default value is true. - google.protobuf.BoolValue expect_match = 2; - - // Specifies a set of headers that the rate limit action should match - // on. The action will check the request’s headers against all the - // specified headers in the config. A match will happen if all the - // headers in the config are present in the request with the same values - // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; - } - - // The following descriptor entry is appended when the - // :ref:`dynamic metadata ` contains a key value: - // - // .. code-block:: cpp - // - // ("", "") - // - // .. attention:: - // This action has been deprecated in favor of the :ref:`metadata ` action - message DynamicMetaData { - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // Metadata struct that defines the key and path to retrieve the string value. A match will - // only happen if the value in the dynamic metadata is of type string. - type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; - - // An optional value to use if *metadata_key* is empty. If not set and - // no value is present under the metadata_key then no descriptor is generated. - string default_value = 3; - } - - // The following descriptor entry is appended when the metadata contains a key value: - // - // .. code-block:: cpp - // - // ("", "") - message MetaData { - enum Source { - // Query :ref:`dynamic metadata ` - DYNAMIC = 0; - - // Query :ref:`route entry metadata ` - ROUTE_ENTRY = 1; - } - - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // Metadata struct that defines the key and path to retrieve the string value. A match will - // only happen if the value in the metadata is of type string. - type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; - - // An optional value to use if *metadata_key* is empty. If not set and - // no value is present under the metadata_key then no descriptor is generated. - string default_value = 3; - - // Source of metadata - Source source = 4 [(validate.rules).enum = {defined_only: true}]; - } - - oneof action_specifier { - option (validate.required) = true; - - // Rate limit on source cluster. - SourceCluster source_cluster = 1; - - // Rate limit on destination cluster. - DestinationCluster destination_cluster = 2; - - // Rate limit on request headers. - RequestHeaders request_headers = 3; - - // Rate limit on remote address. - RemoteAddress remote_address = 4; - - // Rate limit on a generic key. - GenericKey generic_key = 5; - - // Rate limit on the existence of request headers. - HeaderValueMatch header_value_match = 6; - - // Rate limit on dynamic metadata. - // - // .. attention:: - // This field has been deprecated in favor of the :ref:`metadata ` field - DynamicMetaData dynamic_metadata = 7 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - // Rate limit on metadata. - MetaData metadata = 8; - - // Rate limit descriptor extension. See the rate limit descriptor extensions documentation. - // [#extension-category: envoy.rate_limit_descriptors] - core.v3.TypedExtensionConfig extension = 9; - } - } - - message Override { - // Fetches the override from the dynamic metadata. - message DynamicMetadata { - // Metadata struct that defines the key and path to retrieve the struct value. - // The value must be a struct containing an integer "requests_per_unit" property - // and a "unit" property with a value parseable to :ref:`RateLimitUnit - // enum ` - type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; - } - - oneof override_specifier { - option (validate.required) = true; - - // Limit override from dynamic metadata. - DynamicMetadata dynamic_metadata = 1; - } - } - - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - - // A list of actions that are to be applied for this rate limit configuration. - // Order matters as the actions are processed sequentially and the descriptor - // is composed by appending descriptor entries in that sequence. If an action - // cannot append a descriptor entry, no descriptor is generated for the - // configuration. See :ref:`composing actions - // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; - - // An optional limit override to be appended to the descriptor produced by this - // rate limit configuration. If the override value is invalid or cannot be resolved - // from metadata, no override is provided. See :ref:`rate limit override - // ` for more information. - Override limit = 4; -} - -// .. attention:: -// -// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* -// header. Thus, if attempting to match on *Host*, match on *:authority* instead. -// -// .. attention:: -// -// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both -// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., -// -// .. code-block:: json -// -// { -// "name": ":method", -// "exact_match": "POST" -// } -// -// .. attention:: -// In the absence of any header match specifier, match will default to :ref:`present_match -// `. i.e, a request that has the :ref:`name -// ` header will match, regardless of the header's -// value. -// -// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] -// [#next-free-field: 14] -message HeaderMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HeaderMatcher"; - - reserved 2, 3; - - // Specifies the name of the header in the request. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Specifies how the header match will be performed to route the request. - oneof header_match_specifier { - // If specified, header match will be performed based on the value of the header. - // This field is deprecated. Please use :ref:`string_match `. - string exact_match = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. - // This field is deprecated. Please use :ref:`string_match `. - type.matcher.v3.RegexMatcher safe_regex_match = 11 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting of - // an optional plus or minus sign followed by a sequence of digits. The rule will not match if - // the header value does not represent an integer. Match will fail for empty values, floating - // point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - // "-1somestring" - type.v3.Int64Range range_match = 6; - - // If specified as true, header match will be performed based on whether the header is in the - // request. If specified as false, header match will be performed based on whether the header is absent. - bool present_match = 7; - - // If specified, header match will be performed based on the prefix of the header value. - // Note: empty prefix is not allowed, please use present_match instead. - // This field is deprecated. Please use :ref:`string_match `. - // - // Examples: - // - // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // If specified, header match will be performed based on the suffix of the header value. - // Note: empty suffix is not allowed, please use present_match instead. - // This field is deprecated. Please use :ref:`string_match `. - // - // Examples: - // - // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // If specified, header match will be performed based on whether the header value contains - // the given value or not. - // Note: empty contains match is not allowed, please use present_match instead. - // This field is deprecated. Please use :ref:`string_match `. - // - // Examples: - // - // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. - string contains_match = 12 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // If specified, header match will be performed based on the string match of the header value. - type.matcher.v3.StringMatcher string_match = 13; - - string hidden_envoy_deprecated_regex_match = 5 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. - bool invert_match = 8; -} - -// Query parameter matching treats the query string of a request's :path header -// as an ampersand-separated list of keys and/or key=value elements. -// [#next-free-field: 7] -message QueryParameterMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.QueryParameterMatcher"; - - // Specifies the name of a key that must be present in the requested - // *path*'s query string. - string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; - - oneof query_parameter_match_specifier { - // Specifies whether a query parameter value should match against a string. - type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; - - // Specifies whether a query parameter should be present. - bool present_match = 6; - } - - string hidden_envoy_deprecated_value = 3 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - google.protobuf.BoolValue hidden_envoy_deprecated_regex = 4 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// HTTP Internal Redirect :ref:`architecture overview `. -message InternalRedirectPolicy { - // An internal redirect is not handled, unless the number of previous internal redirects that a - // downstream request has encountered is lower than this value. - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 1; - - // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, - // only 302 will be treated as internal redirect. - // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. - repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; - - // Specifies a list of predicates that are queried when an upstream response is deemed - // to trigger an internal redirect by all other criteria. Any predicate in the list can reject - // the redirect, causing the response to be proxied to downstream. - // [#extension-category: envoy.internal_redirect_predicates] - repeated core.v3.TypedExtensionConfig predicates = 3; - - // Allow internal redirect to follow a target URI with a different scheme than the value of - // x-forwarded-proto. The default is false. - bool allow_cross_scheme_redirect = 4; -} - -// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the -// map value in -// :ref:`VirtualHost.typed_per_filter_config`, -// :ref:`Route.typed_per_filter_config`, -// or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` -// to add additional flags to the filter. -// [#not-implemented-hide:] -message FilterConfig { - // The filter config. - google.protobuf.Any config = 1; - - // If true, the filter is optional, meaning that if the client does - // not support the specified filter, it may ignore the map entry rather - // than rejecting the config. - bool is_optional = 2; -} diff --git a/generated_api_shadow/envoy/config/route/v3/scoped_route.proto b/generated_api_shadow/envoy/config/route/v3/scoped_route.proto deleted file mode 100644 index eb47d7e10898d..0000000000000 --- a/generated_api_shadow/envoy/config/route/v3/scoped_route.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v3"; -option java_outer_classname = "ScopedRouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP scoped routing configuration] -// * Routing :ref:`architecture overview ` - -// Specifies a routing scope, which associates a -// :ref:`Key` to a -// :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` (identified by its resource name). -// -// The HTTP connection manager builds up a table consisting of these Key to -// RouteConfiguration mappings, and looks up the RouteConfiguration to use per -// request according to the algorithm specified in the -// :ref:`scope_key_builder` -// assigned to the HttpConnectionManager. -// -// For example, with the following configurations (in YAML): -// -// HttpConnectionManager config: -// -// .. code:: -// -// ... -// scoped_routes: -// name: foo-scoped-routes -// scope_key_builder: -// fragments: -// - header_value_extractor: -// name: X-Route-Selector -// element_separator: , -// element: -// separator: = -// key: vip -// -// ScopedRouteConfiguration resources (specified statically via -// :ref:`scoped_route_configurations_list` -// or obtained dynamically via SRDS): -// -// .. code:: -// -// (1) -// name: route-scope1 -// route_configuration_name: route-config1 -// key: -// fragments: -// - string_key: 172.10.10.20 -// -// (2) -// name: route-scope2 -// route_configuration_name: route-config2 -// key: -// fragments: -// - string_key: 172.20.20.30 -// -// A request from a client such as: -// -// .. code:: -// -// GET / HTTP/1.1 -// Host: foo.com -// X-Route-Selector: vip=172.10.10.20 -// -// would result in the routing table defined by the `route-config1` -// RouteConfiguration being assigned to the HTTP request/stream. -// -message ScopedRouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ScopedRouteConfiguration"; - - // Specifies a key which is matched against the output of the - // :ref:`scope_key_builder` - // specified in the HttpConnectionManager. The matching is done per HTTP - // request and is dependent on the order of the fragments contained in the - // Key. - message Key { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ScopedRouteConfiguration.Key"; - - message Fragment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ScopedRouteConfiguration.Key.Fragment"; - - oneof type { - option (validate.required) = true; - - // A string to match against. - string string_key = 1; - } - } - - // The ordered set of fragments to match against. The order must match the - // fragments in the corresponding - // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Whether the RouteConfiguration should be loaded on demand. - bool on_demand = 4; - - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an - // RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated - // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The key to match against. - Key key = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/tap/v3/BUILD b/generated_api_shadow/envoy/config/tap/v3/BUILD deleted file mode 100644 index 416ccc0f9403c..0000000000000 --- a/generated_api_shadow/envoy/config/tap/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/common/matcher/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/service/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/tap/v3/common.proto b/generated_api_shadow/envoy/config/tap/v3/common.proto deleted file mode 100644 index c25a2af5a3b51..0000000000000 --- a/generated_api_shadow/envoy/config/tap/v3/common.proto +++ /dev/null @@ -1,280 +0,0 @@ -syntax = "proto3"; - -package envoy.config.tap.v3; - -import "envoy/config/common/matcher/v3/matcher.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/config/route/v3/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.tap.v3"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common tap configuration] - -// Tap configuration. -message TapConfig { - // [#comment:TODO(mattklein123): Rate limiting] - - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.TapConfig"; - - // The match configuration. If the configuration matches the data source being tapped, a tap will - // occur, with the result written to the configured output. - // Exactly one of :ref:`match ` and - // :ref:`match_config ` must be set. If both - // are set, the :ref:`match ` will be used. - MatchPredicate match_config = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The match configuration. If the configuration matches the data source being tapped, a tap will - // occur, with the result written to the configured output. - // Exactly one of :ref:`match ` and - // :ref:`match_config ` must be set. If both - // are set, the :ref:`match ` will be used. - common.matcher.v3.MatchPredicate match = 4; - - // The tap output configuration. If a match configuration matches a data source being tapped, - // a tap will occur and the data will be written to the configured output. - OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for - // which the tap matching is enabled. When not enabled, the request\connection will not be - // recorded. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - core.v3.RuntimeFractionalPercent tap_enabled = 3; -} - -// Tap match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 11] -message MatchPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.MatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.MatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - - // HTTP request generic body match configuration. - HttpGenericBodyMatch http_request_generic_body_match = 9; - - // HTTP response generic body match configuration. - HttpGenericBodyMatch http_response_generic_body_match = 10; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.HttpHeadersMatch"; - - // HTTP headers to match. - repeated route.v3.HeaderMatcher headers = 1; -} - -// HTTP generic body match configuration. -// List of text strings and hex strings to be located in HTTP body. -// All specified strings must be found in the HTTP body for positive match. -// The search may be limited to specified number of bytes from the body start. -// -// .. attention:: -// -// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. -// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified -// to scan only part of the http body. -message HttpGenericBodyMatch { - message GenericTextMatch { - oneof rule { - option (validate.required) = true; - - // Text string to be located in HTTP body. - string string_match = 1 [(validate.rules).string = {min_len: 1}]; - - // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; - } - } - - // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). - uint32 bytes_limit = 1; - - // List of patterns to match. - repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// Tap output configuration. -message OutputConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.OutputConfig"; - - // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple - // sink types are supported this constraint will be relaxed. - repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; - - // For buffered tapping, the maximum amount of received body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_rx_bytes = 2; - - // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_tx_bytes = 3; - - // Indicates whether taps produce a single buffered message per tap, or multiple streamed - // messages per tap in the emitted :ref:`TraceWrapper - // ` messages. Note that streamed tapping does not - // mean that no buffering takes place. Buffering may be required if data is processed before a - // match can be determined. See the HTTP tap filter :ref:`streaming - // ` documentation for more information. - bool streaming = 4; -} - -// Tap output sink configuration. -message OutputSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.OutputSink"; - - // Output format. All output is in the form of one or more :ref:`TraceWrapper - // ` messages. This enumeration indicates - // how those messages are written. Note that not all sinks support all output formats. See - // individual sink documentation for more information. - enum Format { - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_bytes - // ` field. This means that body data will be - // base64 encoded as per the `proto3 JSON mappings - // `_. - JSON_BODY_AS_BYTES = 0; - - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_string - // ` field. This means that body data will be - // string encoded as per the `proto3 JSON mappings - // `_. This format type is - // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the - // user wishes to view it directly without being forced to base64 decode the body. - JSON_BODY_AS_STRING = 1; - - // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes - // multiple binary messages without any length information the data stream will not be - // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) - // this output format makes consumption simpler. - PROTO_BINARY = 2; - - // Messages are written as a sequence tuples, where each tuple is the message length encoded - // as a `protobuf 32-bit varint - // `_ - // followed by the binary message. The messages can be read back using the language specific - // protobuf coded stream implementation to obtain the message length and the message. - PROTO_BINARY_LENGTH_DELIMITED = 3; - - // Text proto format. - PROTO_TEXT = 4; - } - - // Sink output format. - Format format = 1 [(validate.rules).enum = {defined_only: true}]; - - oneof output_sink_type { - option (validate.required) = true; - - // Tap output will be streamed out the :http:post:`/tap` admin endpoint. - // - // .. attention:: - // - // It is only allowed to specify the streaming admin output sink if the tap is being - // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has - // been configured to receive tap configuration from some other source (e.g., static - // file, XDS, etc.) configuring the streaming admin output type will fail. - StreamingAdminSink streaming_admin = 2; - - // Tap output will be written to a file per tap sink. - FilePerTapSink file_per_tap = 3; - - // [#not-implemented-hide:] - // GrpcService to stream data to. The format argument must be PROTO_BINARY. - // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] - StreamingGrpcSink streaming_grpc = 4; - } -} - -// Streaming admin sink configuration. -message StreamingAdminSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamingAdminSink"; -} - -// The file per tap sink outputs a discrete file for every tapped stream. -message FilePerTapSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.FilePerTapSink"; - - // Path prefix. The output file will be of the form _.pb, where is an - // identifier distinguishing the recorded trace for stream instances (the Envoy - // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; -} - -// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC -// server. -message StreamingGrpcSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamingGrpcSink"; - - // Opaque identifier, that will be sent back to the streaming grpc server. - string tap_id = 1; - - // The gRPC server that hosts the Tap Sink Service. - core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/BUILD b/generated_api_shadow/envoy/config/trace/v2/BUILD deleted file mode 100644 index e6505e4f15d01..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", - ], -) diff --git a/generated_api_shadow/envoy/config/trace/v2/datadog.proto b/generated_api_shadow/envoy/config/trace/v2/datadog.proto deleted file mode 100644 index 0992601a8acc4..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/datadog.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "DatadogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Datadog tracer] - -// Configuration for the Datadog tracer. -// [#extension: envoy.tracers.datadog] -message DatadogConfig { - // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/dynamic_ot.proto b/generated_api_shadow/envoy/config/trace/v2/dynamic_ot.proto deleted file mode 100644 index 55c6d401b335f..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/dynamic_ot.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "DynamicOtProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamically loadable OpenTracing tracer] - -// DynamicOtConfig is used to dynamically load a tracer from a shared library -// that implements the `OpenTracing dynamic loading API -// `_. -// [#extension: envoy.tracers.dynamic_ot] -message DynamicOtConfig { - // Dynamic library implementing the `OpenTracing API - // `_. - string library = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The configuration to use when creating a tracer from the given dynamic - // library. - google.protobuf.Struct config = 2; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/http_tracer.proto b/generated_api_shadow/envoy/config/trace/v2/http_tracer.proto deleted file mode 100644 index fba830b987b6d..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/http_tracer.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "HttpTracerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tracing] -// Tracing :ref:`architecture overview `. - -// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. -// -// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one -// supported. -// -// .. attention:: -// -// Use of this message type has been deprecated in favor of direct use of -// :ref:`Tracing.Http `. -message Tracing { - // Configuration for an HTTP tracer provider used by Envoy. - // - // The configuration is defined by the - // :ref:`HttpConnectionManager.Tracing ` - // :ref:`provider ` - // field. - message Http { - // The name of the HTTP trace driver to instantiate. The name must match a - // supported HTTP trace driver. Built-in trace drivers: - // - // - *envoy.tracers.lightstep* - // - *envoy.tracers.zipkin* - // - *envoy.tracers.dynamic_ot* - // - *envoy.tracers.datadog* - // - *envoy.tracers.opencensus* - // - *envoy.tracers.xray* - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Trace driver specific configuration which depends on the driver being instantiated. - // See the trace drivers for examples: - // - // - :ref:`LightstepConfig ` - // - :ref:`ZipkinConfig ` - // - :ref:`DynamicOtConfig ` - // - :ref:`DatadogConfig ` - // - :ref:`OpenCensusConfig ` - // - :ref:`AWS X-Ray ` - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - // Provides configuration for the HTTP tracer. - Http http = 1; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/lightstep.proto b/generated_api_shadow/envoy/config/trace/v2/lightstep.proto deleted file mode 100644 index 849749baaa0d9..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/lightstep.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "LightstepProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: LightStep tracer] - -// Configuration for the LightStep tracer. -// [#extension: envoy.tracers.lightstep] -message LightstepConfig { - // Available propagation modes - enum PropagationMode { - // Propagate trace context in the single header x-ot-span-context. - ENVOY = 0; - - // Propagate trace context using LightStep's native format. - LIGHTSTEP = 1; - - // Propagate trace context using the b3 format. - B3 = 2; - - // Propagation trace context using the w3 trace-context standard. - TRACE_CONTEXT = 3; - } - - // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // File containing the access token to the `LightStep - // `_ API. - string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Propagation modes to use by LightStep's tracer. - repeated PropagationMode propagation_modes = 3 - [(validate.rules).repeated = {items {enum {defined_only: true}}}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/opencensus.proto b/generated_api_shadow/envoy/config/trace/v2/opencensus.proto deleted file mode 100644 index 1a9a879b21e43..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/opencensus.proto +++ /dev/null @@ -1,92 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "opencensus/proto/trace/v1/trace_config.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "OpencensusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: OpenCensus tracer] - -// Configuration for the OpenCensus tracer. -// [#next-free-field: 15] -// [#extension: envoy.tracers.opencensus] -message OpenCensusConfig { - enum TraceContext { - // No-op default, no trace context is utilized. - NONE = 0; - - // W3C Trace-Context format "traceparent:" header. - TRACE_CONTEXT = 1; - - // Binary "grpc-trace-bin:" header. - GRPC_TRACE_BIN = 2; - - // "X-Cloud-Trace-Context:" header. - CLOUD_TRACE_CONTEXT = 3; - - // X-B3-* headers. - B3 = 4; - } - - reserved 7; - - // Configures tracing, e.g. the sampler, max number of annotations, etc. - opencensus.proto.trace.v1.TraceConfig trace_config = 1; - - // Enables the stdout exporter if set to true. This is intended for debugging - // purposes. - bool stdout_exporter_enabled = 2; - - // Enables the Stackdriver exporter if set to true. The project_id must also - // be set. - bool stackdriver_exporter_enabled = 3; - - // The Cloud project_id to use for Stackdriver tracing. - string stackdriver_project_id = 4; - - // (optional) By default, the Stackdriver exporter will connect to production - // Stackdriver. If stackdriver_address is non-empty, it will instead connect - // to this address, which is in the gRPC format: - // https://github.com/grpc/grpc/blob/master/doc/naming.md - string stackdriver_address = 10; - - // (optional) The gRPC server that hosts Stackdriver tracing service. Only - // Google gRPC is supported. If :ref:`target_uri ` - // is not provided, the default production Stackdriver address will be used. - api.v2.core.GrpcService stackdriver_grpc_service = 13; - - // Enables the Zipkin exporter if set to true. The url and service name must - // also be set. - bool zipkin_exporter_enabled = 5; - - // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans" - string zipkin_url = 6; - - // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or - // ocagent_grpc_service must also be set. - bool ocagent_exporter_enabled = 11; - - // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC - // format: https://github.com/grpc/grpc/blob/master/doc/naming.md - // [#comment:TODO: deprecate this field] - string ocagent_address = 12; - - // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. - // This is only used if the ocagent_address is left empty. - api.v2.core.GrpcService ocagent_grpc_service = 14; - - // List of incoming trace context headers we will accept. First one found - // wins. - repeated TraceContext incoming_trace_context = 8; - - // List of outgoing trace context headers we will produce. - repeated TraceContext outgoing_trace_context = 9; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/service.proto b/generated_api_shadow/envoy/config/trace/v2/service.proto deleted file mode 100644 index d102499b6261a..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/service.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "ServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Trace Service] - -// Configuration structure. -message TraceServiceConfig { - // The upstream gRPC cluster that hosts the metrics service. - api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/trace.proto b/generated_api_shadow/envoy/config/trace/v2/trace.proto deleted file mode 100644 index 6ed394147db10..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/trace.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import public "envoy/config/trace/v2/datadog.proto"; -import public "envoy/config/trace/v2/dynamic_ot.proto"; -import public "envoy/config/trace/v2/http_tracer.proto"; -import public "envoy/config/trace/v2/lightstep.proto"; -import public "envoy/config/trace/v2/opencensus.proto"; -import public "envoy/config/trace/v2/service.proto"; -import public "envoy/config/trace/v2/zipkin.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "TraceProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/config/trace/v2/zipkin.proto b/generated_api_shadow/envoy/config/trace/v2/zipkin.proto deleted file mode 100644 index a825d85bb7f94..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/zipkin.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "ZipkinProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Zipkin tracer] - -// Configuration for the Zipkin tracer. -// [#extension: envoy.tracers.zipkin] -// [#next-free-field: 6] -message ZipkinConfig { - // Available Zipkin collector endpoint versions. - enum CollectorEndpointVersion { - // Zipkin API v1, JSON over HTTP. - // [#comment: The default implementation of Zipkin client before this field is added was only v1 - // and the way user configure this was by not explicitly specifying the version. Consequently, - // before this is added, the corresponding Zipkin collector expected to receive v1 payload. - // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when - // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, - // since in Zipkin realm this v1 version is considered to be not preferable anymore.] - HTTP_JSON_V1 = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; - - // Zipkin API v2, JSON over HTTP. - HTTP_JSON = 1; - - // Zipkin API v2, protobuf over HTTP. - HTTP_PROTO = 2; - - // [#not-implemented-hide:] - GRPC = 3; - } - - // The cluster manager cluster that hosts the Zipkin collectors. Note that the - // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster - // resources `. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation, the API endpoint is typically - // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Determines whether a 128bit trace id will be used when creating a new - // trace instance. The default value is false, which will result in a 64 bit trace id being used. - bool trace_id_128bit = 3; - - // Determines whether client and server spans will share the same span context. - // The default value is true. - google.protobuf.BoolValue shared_span_context = 4; - - // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be - // used. - CollectorEndpointVersion collector_endpoint_version = 5; -} diff --git a/generated_api_shadow/envoy/config/trace/v2alpha/BUILD b/generated_api_shadow/envoy/config/trace/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto b/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto deleted file mode 100644 index 27db3ba40b724..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2alpha; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2alpha"; -option java_outer_classname = "XrayProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: AWS X-Ray Tracer Configuration] -// Configuration for AWS X-Ray tracer - -message XRayConfig { - // The UDP endpoint of the X-Ray Daemon where the spans will be sent. - // If this value is not set, the default value of 127.0.0.1:2000 will be used. - api.v2.core.SocketAddress daemon_endpoint = 1; - - // The name of the X-Ray segment. - string segment_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The location of a local custom sampling rules JSON file. - // For an example of the sampling rules see: - // `X-Ray SDK documentation - // `_ - api.v2.core.DataSource sampling_rule_manifest = 3; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/BUILD b/generated_api_shadow/envoy/config/trace/v3/BUILD deleted file mode 100644 index ec0d9dd6a65ba..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/trace/v2:pkg", - "//envoy/config/trace/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", - ], -) diff --git a/generated_api_shadow/envoy/config/trace/v3/datadog.proto b/generated_api_shadow/envoy/config/trace/v3/datadog.proto deleted file mode 100644 index c101ab2f03c9a..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/datadog.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "DatadogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.datadog.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Datadog tracer] - -// Configuration for the Datadog tracer. -// [#extension: envoy.tracers.datadog] -message DatadogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.DatadogConfig"; - - // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto b/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto deleted file mode 100644 index c281068715428..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "DynamicOtProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.tracers.dynamic_ot.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamically loadable OpenTracing tracer] - -// DynamicOtConfig is used to dynamically load a tracer from a shared library -// that implements the `OpenTracing dynamic loading API -// `_. -// [#extension: envoy.tracers.dynamic_ot] -message DynamicOtConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.DynamicOtConfig"; - - // Dynamic library implementing the `OpenTracing API - // `_. - string library = 1 [(validate.rules).string = {min_len: 1}]; - - // The configuration to use when creating a tracer from the given dynamic - // library. - google.protobuf.Struct config = 2; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto b/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto deleted file mode 100644 index 5ec74646e79be..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto +++ /dev/null @@ -1,60 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "HttpTracerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tracing] -// Tracing :ref:`architecture overview `. - -// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. -// -// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one -// supported. -// -// .. attention:: -// -// Use of this message type has been deprecated in favor of direct use of -// :ref:`Tracing.Http `. -message Tracing { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.Tracing"; - - // Configuration for an HTTP tracer provider used by Envoy. - // - // The configuration is defined by the - // :ref:`HttpConnectionManager.Tracing ` - // :ref:`provider ` - // field. - message Http { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.Tracing.Http"; - - // The name of the HTTP trace driver to instantiate. The name must match a - // supported HTTP trace driver. - // See the :ref:`extensions listed in typed_config below ` for the default list of the HTTP trace driver. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Trace driver specific configuration which must be set according to the driver being instantiated. - // [#extension-category: envoy.tracers] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - // Provides configuration for the HTTP tracer. - Http http = 1; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/lightstep.proto b/generated_api_shadow/envoy/config/trace/v3/lightstep.proto deleted file mode 100644 index b5cff53fea96a..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/lightstep.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/base.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "LightstepProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.tracers.lightstep.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: LightStep tracer] - -// Configuration for the LightStep tracer. -// [#extension: envoy.tracers.lightstep] -message LightstepConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.LightstepConfig"; - - // Available propagation modes - enum PropagationMode { - // Propagate trace context in the single header x-ot-span-context. - ENVOY = 0; - - // Propagate trace context using LightStep's native format. - LIGHTSTEP = 1; - - // Propagate trace context using the b3 format. - B3 = 2; - - // Propagation trace context using the w3 trace-context standard. - TRACE_CONTEXT = 3; - } - - // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // File containing the access token to the `LightStep - // `_ API. - string access_token_file = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Access token to the `LightStep `_ API. - core.v3.DataSource access_token = 4; - - // Propagation modes to use by LightStep's tracer. - repeated PropagationMode propagation_modes = 3 - [(validate.rules).repeated = {items {enum {defined_only: true}}}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/opencensus.proto b/generated_api_shadow/envoy/config/trace/v3/opencensus.proto deleted file mode 100644 index ee2241e729a81..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/opencensus.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "opencensus/proto/trace/v1/trace_config.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "OpencensusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.tracers.opencensus.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: OpenCensus tracer] - -// Configuration for the OpenCensus tracer. -// [#next-free-field: 15] -// [#extension: envoy.tracers.opencensus] -message OpenCensusConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.OpenCensusConfig"; - - enum TraceContext { - // No-op default, no trace context is utilized. - NONE = 0; - - // W3C Trace-Context format "traceparent:" header. - TRACE_CONTEXT = 1; - - // Binary "grpc-trace-bin:" header. - GRPC_TRACE_BIN = 2; - - // "X-Cloud-Trace-Context:" header. - CLOUD_TRACE_CONTEXT = 3; - - // X-B3-* headers. - B3 = 4; - } - - reserved 7; - - // Configures tracing, e.g. the sampler, max number of annotations, etc. - opencensus.proto.trace.v1.TraceConfig trace_config = 1; - - // Enables the stdout exporter if set to true. This is intended for debugging - // purposes. - bool stdout_exporter_enabled = 2; - - // Enables the Stackdriver exporter if set to true. The project_id must also - // be set. - bool stackdriver_exporter_enabled = 3; - - // The Cloud project_id to use for Stackdriver tracing. - string stackdriver_project_id = 4; - - // (optional) By default, the Stackdriver exporter will connect to production - // Stackdriver. If stackdriver_address is non-empty, it will instead connect - // to this address, which is in the gRPC format: - // https://github.com/grpc/grpc/blob/master/doc/naming.md - string stackdriver_address = 10; - - // (optional) The gRPC server that hosts Stackdriver tracing service. Only - // Google gRPC is supported. If :ref:`target_uri ` - // is not provided, the default production Stackdriver address will be used. - core.v3.GrpcService stackdriver_grpc_service = 13; - - // Enables the Zipkin exporter if set to true. The url and service name must - // also be set. This is deprecated, prefer to use Envoy's :ref:`native Zipkin - // tracer `. - bool zipkin_exporter_enabled = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans". This is - // deprecated, prefer to use Envoy's :ref:`native Zipkin tracer - // `. - string zipkin_url = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or - // ocagent_grpc_service must also be set. - bool ocagent_exporter_enabled = 11; - - // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC - // format: https://github.com/grpc/grpc/blob/master/doc/naming.md - // [#comment:TODO: deprecate this field] - string ocagent_address = 12; - - // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. - // This is only used if the ocagent_address is left empty. - core.v3.GrpcService ocagent_grpc_service = 14; - - // List of incoming trace context headers we will accept. First one found - // wins. - repeated TraceContext incoming_trace_context = 8; - - // List of outgoing trace context headers we will produce. - repeated TraceContext outgoing_trace_context = 9; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/service.proto b/generated_api_shadow/envoy/config/trace/v3/service.proto deleted file mode 100644 index 1e01ff61847f0..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/service.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "ServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Trace Service] - -// Configuration structure. -message TraceServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.TraceServiceConfig"; - - // The upstream gRPC cluster that hosts the metrics service. - core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/skywalking.proto b/generated_api_shadow/envoy/config/trace/v3/skywalking.proto deleted file mode 100644 index 3961a9e4db860..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/skywalking.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "SkywalkingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.tracers.skywalking.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SkyWalking tracer] - -// Configuration for the SkyWalking tracer. Please note that if SkyWalking tracer is used as the -// provider of http tracer, then -// :ref:`start_child_span ` -// in the router must be set to true to get the correct topology and tracing data. Moreover, SkyWalking -// Tracer does not support SkyWalking extension header (``sw8-x``) temporarily. -// [#extension: envoy.tracers.skywalking] -message SkyWalkingConfig { - // SkyWalking collector service. - core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; - - ClientConfig client_config = 2; -} - -// Client config for SkyWalking tracer. -message ClientConfig { - // Service name for SkyWalking tracer. If this field is empty, then local service cluster name - // that configured by :ref:`Bootstrap node ` - // message's :ref:`cluster ` field or command line - // option :option:`--service-cluster` will be used. If both this field and local service cluster - // name are empty, ``EnvoyProxy`` is used as the service name by default. - string service_name = 1; - - // Service instance name for SkyWalking tracer. If this field is empty, then local service node - // that configured by :ref:`Bootstrap node ` - // message's :ref:`id ` field or command line option - // :option:`--service-node` will be used. If both this field and local service node are empty, - // ``EnvoyProxy`` is used as the instance name by default. - string instance_name = 2; - - // Authentication token config for SkyWalking. SkyWalking can use token authentication to secure - // that monitoring application data can be trusted. In current version, Token is considered as a - // simple string. - // [#comment:TODO(wbpcode): Get backend token through the SDS API.] - oneof backend_token_specifier { - // Inline authentication token string. - string backend_token = 3 [(udpa.annotations.sensitive) = true]; - } - - // Envoy caches the segment in memory when the SkyWalking backend service is temporarily unavailable. - // This field specifies the maximum number of segments that can be cached. If not specified, the - // default is 1024. - google.protobuf.UInt32Value max_cache_size = 4; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/trace.proto b/generated_api_shadow/envoy/config/trace/v3/trace.proto deleted file mode 100644 index 472e38b5abb8d..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/trace.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import public "envoy/config/trace/v3/datadog.proto"; -import public "envoy/config/trace/v3/dynamic_ot.proto"; -import public "envoy/config/trace/v3/http_tracer.proto"; -import public "envoy/config/trace/v3/lightstep.proto"; -import public "envoy/config/trace/v3/opencensus.proto"; -import public "envoy/config/trace/v3/service.proto"; -import public "envoy/config/trace/v3/zipkin.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "TraceProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/config/trace/v3/xray.proto b/generated_api_shadow/envoy/config/trace/v3/xray.proto deleted file mode 100644 index 208170b60c3f7..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/xray.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "XrayProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.xray.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: AWS X-Ray Tracer Configuration] -// Configuration for AWS X-Ray tracer - -// [#extension: envoy.tracers.xray] -message XRayConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2alpha.XRayConfig"; - - message SegmentFields { - // The type of AWS resource, e.g. "AWS::AppMesh::Proxy". - string origin = 1; - - // AWS resource metadata dictionary. - // See: `X-Ray Segment Document documentation `__ - google.protobuf.Struct aws = 2; - } - - // The UDP endpoint of the X-Ray Daemon where the spans will be sent. - // If this value is not set, the default value of 127.0.0.1:2000 will be used. - core.v3.SocketAddress daemon_endpoint = 1; - - // The name of the X-Ray segment. - string segment_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The location of a local custom sampling rules JSON file. - // For an example of the sampling rules see: - // `X-Ray SDK documentation - // `_ - core.v3.DataSource sampling_rule_manifest = 3; - - // Optional custom fields to be added to each trace segment. - // see: `X-Ray Segment Document documentation - // `__ - SegmentFields segment_fields = 4; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/zipkin.proto b/generated_api_shadow/envoy/config/trace/v3/zipkin.proto deleted file mode 100644 index 42e46ed69c649..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/zipkin.proto +++ /dev/null @@ -1,73 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "ZipkinProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.zipkin.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Zipkin tracer] - -// Configuration for the Zipkin tracer. -// [#extension: envoy.tracers.zipkin] -// [#next-free-field: 7] -message ZipkinConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.ZipkinConfig"; - - // Available Zipkin collector endpoint versions. - enum CollectorEndpointVersion { - // Zipkin API v1, JSON over HTTP. - // [#comment: The default implementation of Zipkin client before this field is added was only v1 - // and the way user configure this was by not explicitly specifying the version. Consequently, - // before this is added, the corresponding Zipkin collector expected to receive v1 payload. - // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when - // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, - // since in Zipkin realm this v1 version is considered to be not preferable anymore.] - hidden_envoy_deprecated_HTTP_JSON_V1 = 0 [ - deprecated = true, - (envoy.annotations.disallowed_by_default_enum) = true, - (envoy.annotations.deprecated_at_minor_version_enum) = "3.0" - ]; - - // Zipkin API v2, JSON over HTTP. - HTTP_JSON = 1; - - // Zipkin API v2, protobuf over HTTP. - HTTP_PROTO = 2; - - // [#not-implemented-hide:] - GRPC = 3; - } - - // The cluster manager cluster that hosts the Zipkin collectors. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation. - string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; - - // Determines whether a 128bit trace id will be used when creating a new - // trace instance. The default value is false, which will result in a 64 bit trace id being used. - bool trace_id_128bit = 3; - - // Determines whether client and server spans will share the same span context. - // The default value is true. - google.protobuf.BoolValue shared_span_context = 4; - - // Determines the selected collector endpoint version. - CollectorEndpointVersion collector_endpoint_version = 5; - - // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors - // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. - string collector_hostname = 6; -} diff --git a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/BUILD b/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto b/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto deleted file mode 100644 index 92d5fb83a49cd..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.transport_socket.alts.v2alpha; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v2alpha"; -option java_outer_classname = "AltsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.alts.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: ALTS] -// [#extension: envoy.transport_sockets.alts] - -// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy. -// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ -message Alts { - // The location of a handshaker service, this is usually 169.254.169.254:8080 - // on GCE. - string handshaker_service = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The acceptable service accounts from peer, peers not in the list will be rejected in the - // handshake validation step. If empty, no validation will be performed. - repeated string peer_service_accounts = 2; -} diff --git a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/BUILD b/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto b/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto deleted file mode 100644 index 1b3fd395d5724..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.transport_socket.raw_buffer.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.transport_socket.raw_buffer.v2"; -option java_outer_classname = "RawBufferProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.raw_buffer.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Raw Buffer] -// [#extension: envoy.transport_sockets.raw_buffer] - -// Configuration for raw buffer transport socket. -message RawBuffer { -} diff --git a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/BUILD b/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/BUILD deleted file mode 100644 index 52ca9859536e8..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/config/common/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto b/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto deleted file mode 100644 index 0802c7558ad35..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.config.transport_socket.tap.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/config/common/tap/v2alpha/common.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tap.v3"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap] -// [#extension: envoy.transport_sockets.tap] - -// Configuration for tap transport socket. This wraps another transport socket, providing the -// ability to interpose and record in plain text any traffic that is surfaced to Envoy. -message Tap { - // Common configuration for the tap transport socket. - common.tap.v2alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; - - // The underlying transport socket being wrapped. - api.v2.core.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/data/accesslog/v2/BUILD b/generated_api_shadow/envoy/data/accesslog/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/data/accesslog/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto deleted file mode 100644 index af19197f62a6a..0000000000000 --- a/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto +++ /dev/null @@ -1,378 +0,0 @@ -syntax = "proto3"; - -package envoy.data.accesslog.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.accesslog.v2"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC access logs] -// Envoy access logs describe incoming interaction with Envoy over a fixed -// period of time, and typically cover a single request/response exchange, -// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP). -// Access logs contain fields defined in protocol-specific protobuf messages. -// -// Except where explicitly declared otherwise, all fields describe -// *downstream* interaction between Envoy and a connected client. -// Fields describing *upstream* interaction will explicitly include ``upstream`` -// in their name. - -message TCPAccessLogEntry { - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - - // Properties of the TCP connection. - ConnectionProperties connection_properties = 2; -} - -message HTTPAccessLogEntry { - // HTTP version - enum HTTPVersion { - PROTOCOL_UNSPECIFIED = 0; - HTTP10 = 1; - HTTP11 = 2; - HTTP2 = 3; - HTTP3 = 4; - } - - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - - HTTPVersion protocol_version = 2; - - // Description of the incoming HTTP request. - HTTPRequestProperties request = 3; - - // Description of the outgoing HTTP response. - HTTPResponseProperties response = 4; -} - -// Defines fields for a connection -message ConnectionProperties { - // Number of bytes received from downstream. - uint64 received_bytes = 1; - - // Number of bytes sent to downstream. - uint64 sent_bytes = 2; -} - -// Defines fields that are shared by all Envoy access logs. -// [#next-free-field: 22] -message AccessLogCommon { - // [#not-implemented-hide:] - // This field indicates the rate at which this log entry was sampled. - // Valid range is (0.0, 1.0]. - double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; - - // This field is the remote/origin address on which the request from the user was received. - // Note: This may not be the physical peer. E.g, if the remote address is inferred from for - // example the x-forwarder-for header, proxy protocol, etc. - api.v2.core.Address downstream_remote_address = 2; - - // This field is the local/destination address on which the request from the user was received. - api.v2.core.Address downstream_local_address = 3; - - // If the connection is secure,S this field will contain TLS properties. - TLSProperties tls_properties = 4; - - // The time that Envoy started servicing this request. This is effectively the time that the first - // downstream byte is received. - google.protobuf.Timestamp start_time = 5; - - // Interval between the first downstream byte received and the last - // downstream byte received (i.e. time it takes to receive a request). - google.protobuf.Duration time_to_last_rx_byte = 6; - - // Interval between the first downstream byte received and the first upstream byte sent. There may - // by considerable delta between *time_to_last_rx_byte* and this value due to filters. - // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about - // not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_first_upstream_tx_byte = 7; - - // Interval between the first downstream byte received and the last upstream byte sent. There may - // by considerable delta between *time_to_last_rx_byte* and this value due to filters. - // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about - // not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_last_upstream_tx_byte = 8; - - // Interval between the first downstream byte received and the first upstream - // byte received (i.e. time it takes to start receiving a response). - google.protobuf.Duration time_to_first_upstream_rx_byte = 9; - - // Interval between the first downstream byte received and the last upstream - // byte received (i.e. time it takes to receive a complete response). - google.protobuf.Duration time_to_last_upstream_rx_byte = 10; - - // Interval between the first downstream byte received and the first downstream byte sent. - // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field - // due to filters. Additionally, the same caveats apply as documented in - // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_first_downstream_tx_byte = 11; - - // Interval between the first downstream byte received and the last downstream byte sent. - // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta - // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate - // time. In the current implementation it does not include kernel socket buffer time. In the - // current implementation it also does not include send window buffering inside the HTTP/2 codec. - // In the future it is likely that work will be done to make this duration more accurate. - google.protobuf.Duration time_to_last_downstream_tx_byte = 12; - - // The upstream remote/destination address that handles this exchange. This does not include - // retries. - api.v2.core.Address upstream_remote_address = 13; - - // The upstream local/origin address that handles this exchange. This does not include retries. - api.v2.core.Address upstream_local_address = 14; - - // The upstream cluster that *upstream_remote_address* belongs to. - string upstream_cluster = 15; - - // Flags indicating occurrences during request/response processing. - ResponseFlags response_flags = 16; - - // All metadata encountered during request processing, including endpoint - // selection. - // - // This can be used to associate IDs attached to the various configurations - // used to process this request with the access log entry. For example, a - // route created from a higher level forwarding rule with some ID can place - // that ID in this field and cross reference later. It can also be used to - // determine if a canary endpoint was used or not. - api.v2.core.Metadata metadata = 17; - - // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the - // failure reason from the transport socket. The format of this field depends on the configured - // upstream transport socket. Common TLS failures are in - // :ref:`TLS trouble shooting `. - string upstream_transport_failure_reason = 18; - - // The name of the route - string route_name = 19; - - // This field is the downstream direct remote address on which the request from the user was - // received. Note: This is always the physical peer, even if the remote address is inferred from - // for example the x-forwarder-for header, proxy protocol, etc. - api.v2.core.Address downstream_direct_remote_address = 20; - - // Map of filter state in stream info that have been configured to be logged. If the filter - // state serialized to any message other than `google.protobuf.Any` it will be packed into - // `google.protobuf.Any`. - map filter_state_objects = 21; -} - -// Flags indicating occurrences during request/response processing. -// [#next-free-field: 20] -message ResponseFlags { - message Unauthorized { - // Reasons why the request was unauthorized - enum Reason { - REASON_UNSPECIFIED = 0; - - // The request was denied by the external authorization service. - EXTERNAL_SERVICE = 1; - } - - Reason reason = 1; - } - - // Indicates local server healthcheck failed. - bool failed_local_healthcheck = 1; - - // Indicates there was no healthy upstream. - bool no_healthy_upstream = 2; - - // Indicates an there was an upstream request timeout. - bool upstream_request_timeout = 3; - - // Indicates local codec level reset was sent on the stream. - bool local_reset = 4; - - // Indicates remote codec level reset was received on the stream. - bool upstream_remote_reset = 5; - - // Indicates there was a local reset by a connection pool due to an initial connection failure. - bool upstream_connection_failure = 6; - - // Indicates the stream was reset due to an upstream connection termination. - bool upstream_connection_termination = 7; - - // Indicates the stream was reset because of a resource overflow. - bool upstream_overflow = 8; - - // Indicates no route was found for the request. - bool no_route_found = 9; - - // Indicates that the request was delayed before proxying. - bool delay_injected = 10; - - // Indicates that the request was aborted with an injected error code. - bool fault_injected = 11; - - // Indicates that the request was rate-limited locally. - bool rate_limited = 12; - - // Indicates if the request was deemed unauthorized and the reason for it. - Unauthorized unauthorized_details = 13; - - // Indicates that the request was rejected because there was an error in rate limit service. - bool rate_limit_service_error = 14; - - // Indicates the stream was reset due to a downstream connection termination. - bool downstream_connection_termination = 15; - - // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. - bool upstream_retry_limit_exceeded = 16; - - // Indicates that the stream idle timeout was hit, resulting in a downstream 408. - bool stream_idle_timeout = 17; - - // Indicates that the request was rejected because an envoy request header failed strict - // validation. - bool invalid_envoy_request_headers = 18; - - // Indicates there was an HTTP protocol error on the downstream request. - bool downstream_protocol_error = 19; -} - -// Properties of a negotiated TLS connection. -// [#next-free-field: 7] -message TLSProperties { - enum TLSVersion { - VERSION_UNSPECIFIED = 0; - TLSv1 = 1; - TLSv1_1 = 2; - TLSv1_2 = 3; - TLSv1_3 = 4; - } - - message CertificateProperties { - message SubjectAltName { - oneof san { - string uri = 1; - - // [#not-implemented-hide:] - string dns = 2; - } - } - - // SANs present in the certificate. - repeated SubjectAltName subject_alt_name = 1; - - // The subject field of the certificate. - string subject = 2; - } - - // Version of TLS that was negotiated. - TLSVersion tls_version = 1; - - // TLS cipher suite negotiated during handshake. The value is a - // four-digit hex code defined by the IANA TLS Cipher Suite Registry - // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). - // - // Here it is expressed as an integer. - google.protobuf.UInt32Value tls_cipher_suite = 2; - - // SNI hostname from handshake. - string tls_sni_hostname = 3; - - // Properties of the local certificate used to negotiate TLS. - CertificateProperties local_certificate_properties = 4; - - // Properties of the peer certificate used to negotiate TLS. - CertificateProperties peer_certificate_properties = 5; - - // The TLS session ID. - string tls_session_id = 6; -} - -// [#next-free-field: 14] -message HTTPRequestProperties { - // The request method (RFC 7231/2616). - api.v2.core.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}]; - - // The scheme portion of the incoming request URI. - string scheme = 2; - - // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. - string authority = 3; - - // The port of the incoming request URI - // (unused currently, as port is composed onto authority). - google.protobuf.UInt32Value port = 4; - - // The path portion from the incoming request URI. - string path = 5; - - // Value of the ``User-Agent`` request header. - string user_agent = 6; - - // Value of the ``Referer`` request header. - string referer = 7; - - // Value of the ``X-Forwarded-For`` request header. - string forwarded_for = 8; - - // Value of the ``X-Request-Id`` request header - // - // This header is used by Envoy to uniquely identify a request. - // It will be generated for all external requests and internal requests that - // do not already have a request ID. - string request_id = 9; - - // Value of the ``X-Envoy-Original-Path`` request header. - string original_path = 10; - - // Size of the HTTP request headers in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 request_headers_bytes = 11; - - // Size of the HTTP request body in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 request_body_bytes = 12; - - // Map of additional headers that have been configured to be logged. - map request_headers = 13; -} - -// [#next-free-field: 7] -message HTTPResponseProperties { - // The HTTP response code returned by Envoy. - google.protobuf.UInt32Value response_code = 1; - - // Size of the HTTP response headers in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 response_headers_bytes = 2; - - // Size of the HTTP response body in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 response_body_bytes = 3; - - // Map of additional headers configured to be logged. - map response_headers = 4; - - // Map of trailers configured to be logged. - map response_trailers = 5; - - // The HTTP response code details. - string response_code_details = 6; -} diff --git a/generated_api_shadow/envoy/data/accesslog/v3/BUILD b/generated_api_shadow/envoy/data/accesslog/v3/BUILD deleted file mode 100644 index 9065b1b5c331e..0000000000000 --- a/generated_api_shadow/envoy/data/accesslog/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/data/accesslog/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto deleted file mode 100644 index c53ae0d6ab852..0000000000000 --- a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto +++ /dev/null @@ -1,433 +0,0 @@ -syntax = "proto3"; - -package envoy.data.accesslog.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.accesslog.v3"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC access logs] -// Envoy access logs describe incoming interaction with Envoy over a fixed -// period of time, and typically cover a single request/response exchange, -// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP). -// Access logs contain fields defined in protocol-specific protobuf messages. -// -// Except where explicitly declared otherwise, all fields describe -// *downstream* interaction between Envoy and a connected client. -// Fields describing *upstream* interaction will explicitly include ``upstream`` -// in their name. - -message TCPAccessLogEntry { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.TCPAccessLogEntry"; - - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - - // Properties of the TCP connection. - ConnectionProperties connection_properties = 2; -} - -message HTTPAccessLogEntry { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.HTTPAccessLogEntry"; - - // HTTP version - enum HTTPVersion { - PROTOCOL_UNSPECIFIED = 0; - HTTP10 = 1; - HTTP11 = 2; - HTTP2 = 3; - HTTP3 = 4; - } - - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - - HTTPVersion protocol_version = 2; - - // Description of the incoming HTTP request. - HTTPRequestProperties request = 3; - - // Description of the outgoing HTTP response. - HTTPResponseProperties response = 4; -} - -// Defines fields for a connection -message ConnectionProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.ConnectionProperties"; - - // Number of bytes received from downstream. - uint64 received_bytes = 1; - - // Number of bytes sent to downstream. - uint64 sent_bytes = 2; -} - -// Defines fields that are shared by all Envoy access logs. -// [#next-free-field: 22] -message AccessLogCommon { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.AccessLogCommon"; - - // [#not-implemented-hide:] - // This field indicates the rate at which this log entry was sampled. - // Valid range is (0.0, 1.0]. - double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; - - // This field is the remote/origin address on which the request from the user was received. - // Note: This may not be the physical peer. E.g, if the remote address is inferred from for - // example the x-forwarder-for header, proxy protocol, etc. - config.core.v3.Address downstream_remote_address = 2; - - // This field is the local/destination address on which the request from the user was received. - config.core.v3.Address downstream_local_address = 3; - - // If the connection is secure,S this field will contain TLS properties. - TLSProperties tls_properties = 4; - - // The time that Envoy started servicing this request. This is effectively the time that the first - // downstream byte is received. - google.protobuf.Timestamp start_time = 5; - - // Interval between the first downstream byte received and the last - // downstream byte received (i.e. time it takes to receive a request). - google.protobuf.Duration time_to_last_rx_byte = 6; - - // Interval between the first downstream byte received and the first upstream byte sent. There may - // by considerable delta between *time_to_last_rx_byte* and this value due to filters. - // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about - // not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_first_upstream_tx_byte = 7; - - // Interval between the first downstream byte received and the last upstream byte sent. There may - // by considerable delta between *time_to_last_rx_byte* and this value due to filters. - // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about - // not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_last_upstream_tx_byte = 8; - - // Interval between the first downstream byte received and the first upstream - // byte received (i.e. time it takes to start receiving a response). - google.protobuf.Duration time_to_first_upstream_rx_byte = 9; - - // Interval between the first downstream byte received and the last upstream - // byte received (i.e. time it takes to receive a complete response). - google.protobuf.Duration time_to_last_upstream_rx_byte = 10; - - // Interval between the first downstream byte received and the first downstream byte sent. - // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field - // due to filters. Additionally, the same caveats apply as documented in - // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_first_downstream_tx_byte = 11; - - // Interval between the first downstream byte received and the last downstream byte sent. - // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta - // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate - // time. In the current implementation it does not include kernel socket buffer time. In the - // current implementation it also does not include send window buffering inside the HTTP/2 codec. - // In the future it is likely that work will be done to make this duration more accurate. - google.protobuf.Duration time_to_last_downstream_tx_byte = 12; - - // The upstream remote/destination address that handles this exchange. This does not include - // retries. - config.core.v3.Address upstream_remote_address = 13; - - // The upstream local/origin address that handles this exchange. This does not include retries. - config.core.v3.Address upstream_local_address = 14; - - // The upstream cluster that *upstream_remote_address* belongs to. - string upstream_cluster = 15; - - // Flags indicating occurrences during request/response processing. - ResponseFlags response_flags = 16; - - // All metadata encountered during request processing, including endpoint - // selection. - // - // This can be used to associate IDs attached to the various configurations - // used to process this request with the access log entry. For example, a - // route created from a higher level forwarding rule with some ID can place - // that ID in this field and cross reference later. It can also be used to - // determine if a canary endpoint was used or not. - config.core.v3.Metadata metadata = 17; - - // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the - // failure reason from the transport socket. The format of this field depends on the configured - // upstream transport socket. Common TLS failures are in - // :ref:`TLS trouble shooting `. - string upstream_transport_failure_reason = 18; - - // The name of the route - string route_name = 19; - - // This field is the downstream direct remote address on which the request from the user was - // received. Note: This is always the physical peer, even if the remote address is inferred from - // for example the x-forwarder-for header, proxy protocol, etc. - config.core.v3.Address downstream_direct_remote_address = 20; - - // Map of filter state in stream info that have been configured to be logged. If the filter - // state serialized to any message other than `google.protobuf.Any` it will be packed into - // `google.protobuf.Any`. - map filter_state_objects = 21; -} - -// Flags indicating occurrences during request/response processing. -// [#next-free-field: 27] -message ResponseFlags { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.ResponseFlags"; - - message Unauthorized { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.ResponseFlags.Unauthorized"; - - // Reasons why the request was unauthorized - enum Reason { - REASON_UNSPECIFIED = 0; - - // The request was denied by the external authorization service. - EXTERNAL_SERVICE = 1; - } - - Reason reason = 1; - } - - // Indicates local server healthcheck failed. - bool failed_local_healthcheck = 1; - - // Indicates there was no healthy upstream. - bool no_healthy_upstream = 2; - - // Indicates an there was an upstream request timeout. - bool upstream_request_timeout = 3; - - // Indicates local codec level reset was sent on the stream. - bool local_reset = 4; - - // Indicates remote codec level reset was received on the stream. - bool upstream_remote_reset = 5; - - // Indicates there was a local reset by a connection pool due to an initial connection failure. - bool upstream_connection_failure = 6; - - // Indicates the stream was reset due to an upstream connection termination. - bool upstream_connection_termination = 7; - - // Indicates the stream was reset because of a resource overflow. - bool upstream_overflow = 8; - - // Indicates no route was found for the request. - bool no_route_found = 9; - - // Indicates that the request was delayed before proxying. - bool delay_injected = 10; - - // Indicates that the request was aborted with an injected error code. - bool fault_injected = 11; - - // Indicates that the request was rate-limited locally. - bool rate_limited = 12; - - // Indicates if the request was deemed unauthorized and the reason for it. - Unauthorized unauthorized_details = 13; - - // Indicates that the request was rejected because there was an error in rate limit service. - bool rate_limit_service_error = 14; - - // Indicates the stream was reset due to a downstream connection termination. - bool downstream_connection_termination = 15; - - // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. - bool upstream_retry_limit_exceeded = 16; - - // Indicates that the stream idle timeout was hit, resulting in a downstream 408. - bool stream_idle_timeout = 17; - - // Indicates that the request was rejected because an envoy request header failed strict - // validation. - bool invalid_envoy_request_headers = 18; - - // Indicates there was an HTTP protocol error on the downstream request. - bool downstream_protocol_error = 19; - - // Indicates there was a max stream duration reached on the upstream request. - bool upstream_max_stream_duration_reached = 20; - - // Indicates the response was served from a cache filter. - bool response_from_cache_filter = 21; - - // Indicates that a filter configuration is not available. - bool no_filter_config_found = 22; - - // Indicates that request or connection exceeded the downstream connection duration. - bool duration_timeout = 23; - - // Indicates there was an HTTP protocol error in the upstream response. - bool upstream_protocol_error = 24; - - // Indicates no cluster was found for the request. - bool no_cluster_found = 25; - - // Indicates overload manager terminated the request. - bool overload_manager = 26; -} - -// Properties of a negotiated TLS connection. -// [#next-free-field: 7] -message TLSProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.TLSProperties"; - - enum TLSVersion { - VERSION_UNSPECIFIED = 0; - TLSv1 = 1; - TLSv1_1 = 2; - TLSv1_2 = 3; - TLSv1_3 = 4; - } - - message CertificateProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.TLSProperties.CertificateProperties"; - - message SubjectAltName { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.TLSProperties.CertificateProperties.SubjectAltName"; - - oneof san { - string uri = 1; - - // [#not-implemented-hide:] - string dns = 2; - } - } - - // SANs present in the certificate. - repeated SubjectAltName subject_alt_name = 1; - - // The subject field of the certificate. - string subject = 2; - } - - // Version of TLS that was negotiated. - TLSVersion tls_version = 1; - - // TLS cipher suite negotiated during handshake. The value is a - // four-digit hex code defined by the IANA TLS Cipher Suite Registry - // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). - // - // Here it is expressed as an integer. - google.protobuf.UInt32Value tls_cipher_suite = 2; - - // SNI hostname from handshake. - string tls_sni_hostname = 3; - - // Properties of the local certificate used to negotiate TLS. - CertificateProperties local_certificate_properties = 4; - - // Properties of the peer certificate used to negotiate TLS. - CertificateProperties peer_certificate_properties = 5; - - // The TLS session ID. - string tls_session_id = 6; -} - -// [#next-free-field: 14] -message HTTPRequestProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.HTTPRequestProperties"; - - // The request method (RFC 7231/2616). - config.core.v3.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}]; - - // The scheme portion of the incoming request URI. - string scheme = 2; - - // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. - string authority = 3; - - // The port of the incoming request URI - // (unused currently, as port is composed onto authority). - google.protobuf.UInt32Value port = 4; - - // The path portion from the incoming request URI. - string path = 5; - - // Value of the ``User-Agent`` request header. - string user_agent = 6; - - // Value of the ``Referer`` request header. - string referer = 7; - - // Value of the ``X-Forwarded-For`` request header. - string forwarded_for = 8; - - // Value of the ``X-Request-Id`` request header - // - // This header is used by Envoy to uniquely identify a request. - // It will be generated for all external requests and internal requests that - // do not already have a request ID. - string request_id = 9; - - // Value of the ``X-Envoy-Original-Path`` request header. - string original_path = 10; - - // Size of the HTTP request headers in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 request_headers_bytes = 11; - - // Size of the HTTP request body in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 request_body_bytes = 12; - - // Map of additional headers that have been configured to be logged. - map request_headers = 13; -} - -// [#next-free-field: 7] -message HTTPResponseProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.HTTPResponseProperties"; - - // The HTTP response code returned by Envoy. - google.protobuf.UInt32Value response_code = 1; - - // Size of the HTTP response headers in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 response_headers_bytes = 2; - - // Size of the HTTP response body in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 response_body_bytes = 3; - - // Map of additional headers configured to be logged. - map response_headers = 4; - - // Map of trailers configured to be logged. - map response_trailers = 5; - - // The HTTP response code details. - string response_code_details = 6; -} diff --git a/generated_api_shadow/envoy/data/cluster/v2alpha/BUILD b/generated_api_shadow/envoy/data/cluster/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/data/cluster/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto b/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto deleted file mode 100644 index 3ea8bc2597fd8..0000000000000 --- a/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto +++ /dev/null @@ -1,135 +0,0 @@ -syntax = "proto3"; - -package envoy.data.cluster.v2alpha; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.cluster.v2alpha"; -option java_outer_classname = "OutlierDetectionEventProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.data.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Outlier detection logging events] -// :ref:`Outlier detection logging `. - -// Type of ejection that took place -enum OutlierEjectionType { - // In case upstream host returns certain number of consecutive 5xx. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all type of errors are treated as HTTP 5xx errors. - // See :ref:`Cluster outlier detection ` documentation for - // details. - CONSECUTIVE_5XX = 0; - - // In case upstream host returns certain number of consecutive gateway errors - CONSECUTIVE_GATEWAY_FAILURE = 1; - - // Runs over aggregated success rate statistics from every host in cluster - // and selects hosts for which ratio of successful replies deviates from other hosts - // in the cluster. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors (externally and locally generated) are used to calculate success rate - // statistics. See :ref:`Cluster outlier detection ` - // documentation for details. - SUCCESS_RATE = 2; - - // Consecutive local origin failures: Connection failures, resets, timeouts, etc - // This type of ejection happens only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is set to *true*. - // See :ref:`Cluster outlier detection ` documentation for - CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; - - // Runs over aggregated success rate statistics for local origin failures - // for all hosts in the cluster and selects hosts for which success rate deviates from other - // hosts in the cluster. This type of ejection happens only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is set to *true*. - // See :ref:`Cluster outlier detection ` documentation for - SUCCESS_RATE_LOCAL_ORIGIN = 4; - - // Runs over aggregated success rate statistics from every host in cluster and selects hosts for - // which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE = 5; - - // Runs over aggregated success rate statistics for local origin failures from every host in - // cluster and selects hosts for which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; -} - -// Represents possible action applied to upstream host -enum Action { - // In case host was excluded from service - EJECT = 0; - - // In case host was brought back into service - UNEJECT = 1; -} - -// [#next-free-field: 12] -message OutlierDetectionEvent { - // In case of eject represents type of ejection that took place. - OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 2; - - // The time in seconds since the last action (either an ejection or unejection) took place. - google.protobuf.UInt64Value secs_since_last_action = 3; - - // The :ref:`cluster ` that owns the ejected host. - string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}]; - - // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}]; - - // The action that took place. - Action action = 6 [(validate.rules).enum = {defined_only: true}]; - - // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to - // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and - // then re-added). - uint32 num_ejections = 7; - - // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was - // ejected. ``false`` means the event was logged but the host was not actually ejected. - bool enforced = 8; - - oneof event { - option (validate.required) = true; - - OutlierEjectSuccessRate eject_success_rate_event = 9; - - OutlierEjectConsecutive eject_consecutive_event = 10; - - OutlierEjectFailurePercentage eject_failure_percentage_event = 11; - } -} - -message OutlierEjectSuccessRate { - // Host’s success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; - - // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 - // range. - uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; - - // Success rate ejection threshold at the time of the ejection event. - uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; -} - -message OutlierEjectConsecutive { -} - -message OutlierEjectFailurePercentage { - // Host's success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; -} diff --git a/generated_api_shadow/envoy/data/cluster/v3/BUILD b/generated_api_shadow/envoy/data/cluster/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/data/cluster/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto b/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto deleted file mode 100644 index 2ba29d89954bb..0000000000000 --- a/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto +++ /dev/null @@ -1,145 +0,0 @@ -syntax = "proto3"; - -package envoy.data.cluster.v3; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.cluster.v3"; -option java_outer_classname = "OutlierDetectionEventProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Outlier detection logging events] -// :ref:`Outlier detection logging `. - -// Type of ejection that took place -enum OutlierEjectionType { - // In case upstream host returns certain number of consecutive 5xx. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all type of errors are treated as HTTP 5xx errors. - // See :ref:`Cluster outlier detection ` documentation for - // details. - CONSECUTIVE_5XX = 0; - - // In case upstream host returns certain number of consecutive gateway errors - CONSECUTIVE_GATEWAY_FAILURE = 1; - - // Runs over aggregated success rate statistics from every host in cluster - // and selects hosts for which ratio of successful replies deviates from other hosts - // in the cluster. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors (externally and locally generated) are used to calculate success rate - // statistics. See :ref:`Cluster outlier detection ` - // documentation for details. - SUCCESS_RATE = 2; - - // Consecutive local origin failures: Connection failures, resets, timeouts, etc - // This type of ejection happens only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is set to *true*. - // See :ref:`Cluster outlier detection ` documentation for - CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; - - // Runs over aggregated success rate statistics for local origin failures - // for all hosts in the cluster and selects hosts for which success rate deviates from other - // hosts in the cluster. This type of ejection happens only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is set to *true*. - // See :ref:`Cluster outlier detection ` documentation for - SUCCESS_RATE_LOCAL_ORIGIN = 4; - - // Runs over aggregated success rate statistics from every host in cluster and selects hosts for - // which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE = 5; - - // Runs over aggregated success rate statistics for local origin failures from every host in - // cluster and selects hosts for which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; -} - -// Represents possible action applied to upstream host -enum Action { - // In case host was excluded from service - EJECT = 0; - - // In case host was brought back into service - UNEJECT = 1; -} - -// [#next-free-field: 12] -message OutlierDetectionEvent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.cluster.v2alpha.OutlierDetectionEvent"; - - // In case of eject represents type of ejection that took place. - OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 2; - - // The time in seconds since the last action (either an ejection or unejection) took place. - google.protobuf.UInt64Value secs_since_last_action = 3; - - // The :ref:`cluster ` that owns the ejected host. - string cluster_name = 4 [(validate.rules).string = {min_len: 1}]; - - // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - string upstream_url = 5 [(validate.rules).string = {min_len: 1}]; - - // The action that took place. - Action action = 6 [(validate.rules).enum = {defined_only: true}]; - - // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to - // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and - // then re-added). - uint32 num_ejections = 7; - - // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was - // ejected. ``false`` means the event was logged but the host was not actually ejected. - bool enforced = 8; - - oneof event { - option (validate.required) = true; - - OutlierEjectSuccessRate eject_success_rate_event = 9; - - OutlierEjectConsecutive eject_consecutive_event = 10; - - OutlierEjectFailurePercentage eject_failure_percentage_event = 11; - } -} - -message OutlierEjectSuccessRate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.cluster.v2alpha.OutlierEjectSuccessRate"; - - // Host’s success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; - - // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 - // range. - uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; - - // Success rate ejection threshold at the time of the ejection event. - uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; -} - -message OutlierEjectConsecutive { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.cluster.v2alpha.OutlierEjectConsecutive"; -} - -message OutlierEjectFailurePercentage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.cluster.v2alpha.OutlierEjectFailurePercentage"; - - // Host's success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; -} diff --git a/generated_api_shadow/envoy/data/core/v2alpha/BUILD b/generated_api_shadow/envoy/data/core/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/data/core/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto b/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto deleted file mode 100644 index 00fd69fd42d3f..0000000000000 --- a/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto +++ /dev/null @@ -1,88 +0,0 @@ -syntax = "proto3"; - -package envoy.data.core.v2alpha; - -import "envoy/api/v2/core/address.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.core.v2alpha"; -option java_outer_classname = "HealthCheckEventProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Health check logging events] -// :ref:`Health check logging `. - -enum HealthCheckFailureType { - ACTIVE = 0; - PASSIVE = 1; - NETWORK = 2; -} - -enum HealthCheckerType { - HTTP = 0; - TCP = 1; - GRPC = 2; - REDIS = 3; -} - -// [#next-free-field: 10] -message HealthCheckEvent { - HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}]; - - api.v2.core.Address host = 2; - - string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; - - oneof event { - option (validate.required) = true; - - // Host ejection. - HealthCheckEjectUnhealthy eject_unhealthy_event = 4; - - // Host addition. - HealthCheckAddHealthy add_healthy_event = 5; - - // Host failure. - HealthCheckFailure health_check_failure_event = 7; - - // Healthy host became degraded. - DegradedHealthyHost degraded_healthy_host = 8; - - // A degraded host returned to being healthy. - NoLongerDegradedHost no_longer_degraded_host = 9; - } - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 6; -} - -message HealthCheckEjectUnhealthy { - // The type of failure that caused this ejection. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; -} - -message HealthCheckAddHealthy { - // Whether this addition is the result of the first ever health check on a host, in which case - // the configured :ref:`healthy threshold ` - // is bypassed and the host is immediately added. - bool first_check = 1; -} - -message HealthCheckFailure { - // The type of failure that caused this event. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Whether this event is the result of the first ever health check on a host. - bool first_check = 2; -} - -message DegradedHealthyHost { -} - -message NoLongerDegradedHost { -} diff --git a/generated_api_shadow/envoy/data/core/v3/BUILD b/generated_api_shadow/envoy/data/core/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/data/core/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto deleted file mode 100644 index 92e2d68d255da..0000000000000 --- a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto +++ /dev/null @@ -1,106 +0,0 @@ -syntax = "proto3"; - -package envoy.data.core.v3; - -import "envoy/config/core/v3/address.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.core.v3"; -option java_outer_classname = "HealthCheckEventProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Health check logging events] -// :ref:`Health check logging `. - -enum HealthCheckFailureType { - ACTIVE = 0; - PASSIVE = 1; - NETWORK = 2; - NETWORK_TIMEOUT = 3; -} - -enum HealthCheckerType { - HTTP = 0; - TCP = 1; - GRPC = 2; - REDIS = 3; -} - -// [#next-free-field: 10] -message HealthCheckEvent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.HealthCheckEvent"; - - HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}]; - - config.core.v3.Address host = 2; - - string cluster_name = 3 [(validate.rules).string = {min_len: 1}]; - - oneof event { - option (validate.required) = true; - - // Host ejection. - HealthCheckEjectUnhealthy eject_unhealthy_event = 4; - - // Host addition. - HealthCheckAddHealthy add_healthy_event = 5; - - // Host failure. - HealthCheckFailure health_check_failure_event = 7; - - // Healthy host became degraded. - DegradedHealthyHost degraded_healthy_host = 8; - - // A degraded host returned to being healthy. - NoLongerDegradedHost no_longer_degraded_host = 9; - } - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 6; -} - -message HealthCheckEjectUnhealthy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.HealthCheckEjectUnhealthy"; - - // The type of failure that caused this ejection. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; -} - -message HealthCheckAddHealthy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.HealthCheckAddHealthy"; - - // Whether this addition is the result of the first ever health check on a host, in which case - // the configured :ref:`healthy threshold ` - // is bypassed and the host is immediately added. - bool first_check = 1; -} - -message HealthCheckFailure { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.HealthCheckFailure"; - - // The type of failure that caused this event. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Whether this event is the result of the first ever health check on a host. - bool first_check = 2; -} - -message DegradedHealthyHost { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.DegradedHealthyHost"; -} - -message NoLongerDegradedHost { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.NoLongerDegradedHost"; -} diff --git a/generated_api_shadow/envoy/data/dns/v2alpha/BUILD b/generated_api_shadow/envoy/data/dns/v2alpha/BUILD deleted file mode 100644 index e305003238a56..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto b/generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto deleted file mode 100644 index 7a9e535c4f3a2..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package envoy.data.dns.v2alpha; - -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.dns.v2alpha"; -option java_outer_classname = "DnsTableProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: DNS Filter Table Data] -// :ref:`DNS Filter config overview `. - -// This message contains the configuration for the DNS Filter if populated -// from the control plane -message DnsTable { - // This message contains a list of IP addresses returned for a query for a known name - message AddressList { - // This field contains a well formed IP address that is returned - // in the answer for a name query. The address field can be an - // IPv4 or IPv6 address. Address family detection is done automatically - // when Envoy parses the string. Since this field is repeated, - // Envoy will return one randomly chosen entry from this list in the - // DNS response. The random index will vary per query so that we prevent - // clients pinning on a single address for a configured domain - repeated string address = 1 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; - } - - // This message type is extensible and can contain a list of addresses - // or dictate some other method for resolving the addresses for an - // endpoint - message DnsEndpoint { - oneof endpoint_config { - option (validate.required) = true; - - AddressList address_list = 1; - } - } - - message DnsVirtualDomain { - // The domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; - - // The configuration containing the method to determine the address - // of this endpoint - DnsEndpoint endpoint = 2; - - // Sets the TTL in dns answers from Envoy returned to the client - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; - } - - // Control how many times envoy makes an attempt to forward a query to - // an external server - uint32 external_retry_count = 1; - - // Fully qualified domain names for which Envoy will respond to queries - repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; - - // This field serves to help Envoy determine whether it can authoritatively - // answer a query for a name matching a suffix in this list. If the query - // name does not match a suffix in this list, Envoy will forward - // the query to an upstream DNS server - repeated type.matcher.StringMatcher known_suffixes = 3; -} diff --git a/generated_api_shadow/envoy/data/dns/v3/BUILD b/generated_api_shadow/envoy/data/dns/v3/BUILD deleted file mode 100644 index 516369f09675b..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto deleted file mode 100644 index 5cc04440f700f..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto +++ /dev/null @@ -1,156 +0,0 @@ -syntax = "proto3"; - -package envoy.data.dns.v3; - -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.dns.v3"; -option java_outer_classname = "DnsTableProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: DNS Filter Table Data] -// :ref:`DNS Filter config overview `. - -// This message contains the configuration for the DNS Filter if populated -// from the control plane -message DnsTable { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable"; - - // This message contains a list of IP addresses returned for a query for a known name - message AddressList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v2alpha.DnsTable.AddressList"; - - // This field contains a well formed IP address that is returned in the answer for a - // name query. The address field can be an IPv4 or IPv6 address. Address family - // detection is done automatically when Envoy parses the string. Since this field is - // repeated, Envoy will return as many entries from this list in the DNS response while - // keeping the response under 512 bytes - repeated string address = 1 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; - } - - // Specify the service protocol using a numeric or string value - message DnsServiceProtocol { - oneof protocol_config { - option (validate.required) = true; - - // Specify the protocol number for the service. Envoy will try to resolve the number to - // the protocol name. For example, 6 will resolve to "tcp". Refer to: - // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml - // for protocol names and numbers - uint32 number = 1 [(validate.rules).uint32 = {lt: 255}]; - - // Specify the protocol name for the service. - string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - } - } - - // Specify the target for a given DNS service - // [#next-free-field: 6] - message DnsServiceTarget { - // Specify the name of the endpoint for the Service. The name is a hostname or a cluster - oneof endpoint_type { - option (validate.required) = true; - - // Use a resolvable hostname as the endpoint for a service. - string host_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // Use a cluster name as the endpoint for a service. - string cluster_name = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - } - - // The priority of the service record target - uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}]; - - // The weight of the service record target - uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}]; - - // The port to which the service is bound. This value is optional if the target is a - // cluster. Setting port to zero in this case makes the filter use the port value - // from the cluster host - uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}]; - } - - // This message defines a service selection record returned for a service query in a domain - message DnsService { - // The name of the service without the protocol or domain name - string service_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // The service protocol. This can be specified as a string or the numeric value of the protocol - DnsServiceProtocol protocol = 2; - - // The service entry time to live. This is independent from the DNS Answer record TTL - google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}]; - - // The list of targets hosting the service - repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}]; - } - - // Define a list of service records for a given service - message DnsServiceList { - repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - message DnsEndpoint { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v2alpha.DnsTable.DnsEndpoint"; - - oneof endpoint_config { - option (validate.required) = true; - - // Define a list of addresses to return for the specified endpoint - AddressList address_list = 1; - - // Define a cluster whose addresses are returned for the specified endpoint - string cluster_name = 2; - - // Define a DNS Service List for the specified endpoint - DnsServiceList service_list = 3; - } - } - - message DnsVirtualDomain { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; - - // A domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // The configuration containing the method to determine the address of this endpoint - DnsEndpoint endpoint = 2; - - // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; - } - - // Control how many times Envoy makes an attempt to forward a query to an external DNS server - uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - - // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this - // list empty, Envoy will forward all queries to external resolvers - repeated DnsVirtualDomain virtual_domains = 2; - - // This field is deprecated and no longer used in Envoy. The filter's behavior has changed - // internally to use a different data structure allowing the filter to determine whether a - // query is for known domain without the use of this field. - // - // This field serves to help Envoy determine whether it can authoritatively answer a query - // for a name matching a suffix in this list. If the query name does not match a suffix in - // this list, Envoy will forward the query to an upstream DNS server - repeated type.matcher.v3.StringMatcher known_suffixes = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/BUILD b/generated_api_shadow/envoy/data/tap/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/common.proto b/generated_api_shadow/envoy/data/tap/v2alpha/common.proto deleted file mode 100644 index 7c02aa7719542..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/common.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v2alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap common data] - -// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received -// and transmitted data, etc. -message Body { - oneof body_type { - // Body data as bytes. By default, tap body data will be present in this field, as the proto - // `bytes` type can contain any valid byte. - bytes as_bytes = 1; - - // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING - // ` sink - // format type is selected. See the documentation for that option for why this is useful. - string as_string = 2; - } - - // Specifies whether body data has been truncated to fit within the specified - // :ref:`max_buffered_rx_bytes - // ` and - // :ref:`max_buffered_tx_bytes - // ` settings. - bool truncated = 3; -} diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/http.proto b/generated_api_shadow/envoy/data/tap/v2alpha/http.proto deleted file mode 100644 index 60ea68b66d4ad..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/http.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/tap/v2alpha/common.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; -option java_outer_classname = "HttpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP tap data] - -// A fully buffered HTTP trace message. -message HttpBufferedTrace { - // HTTP message wrapper. - message Message { - // Message headers. - repeated api.v2.core.HeaderValue headers = 1; - - // Message body. - Body body = 2; - - // Message trailers. - repeated api.v2.core.HeaderValue trailers = 3; - } - - // Request message. - Message request = 1; - - // Response message. - Message response = 2; -} - -// A streamed HTTP trace segment. Multiple segments make up a full trace. -// [#next-free-field: 8] -message HttpStreamedTraceSegment { - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. - uint64 trace_id = 1; - - oneof message_piece { - // Request headers. - api.v2.core.HeaderMap request_headers = 2; - - // Request body chunk. - Body request_body_chunk = 3; - - // Request trailers. - api.v2.core.HeaderMap request_trailers = 4; - - // Response headers. - api.v2.core.HeaderMap response_headers = 5; - - // Response body chunk. - Body response_body_chunk = 6; - - // Response trailers. - api.v2.core.HeaderMap response_trailers = 7; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto b/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto deleted file mode 100644 index 82c2845ee338f..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v2alpha; - -import "envoy/api/v2/core/address.proto"; -import "envoy/data/tap/v2alpha/common.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; -option java_outer_classname = "TransportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Transport tap data] -// Trace format for the tap transport socket extension. This dumps plain text read/write -// sequences on a socket. - -// Connection properties. -message Connection { - // Local address. - api.v2.core.Address local_address = 2; - - // Remote address. - api.v2.core.Address remote_address = 3; -} - -// Event in a socket trace. -message SocketEvent { - // Data read by Envoy from the transport socket. - message Read { - // TODO(htuch): Half-close for reads. - - // Binary data read. - Body data = 1; - } - - // Data written by Envoy to the transport socket. - message Write { - // Binary data written. - Body data = 1; - - // Stream was half closed after this write. - bool end_stream = 2; - } - - // The connection was closed. - message Closed { - // TODO(mattklein123): Close event type. - } - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 1; - - // Read or write with content as bytes string. - oneof event_selector { - Read read = 2; - - Write write = 3; - - Closed closed = 4; - } -} - -// Sequence of read/write events that constitute a buffered trace on a socket. -// [#next-free-field: 6] -message SocketBufferedTrace { - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. Matches connection IDs used in Envoy logs. - uint64 trace_id = 1; - - // Connection properties. - Connection connection = 2; - - // Sequence of observed events. - repeated SocketEvent events = 3; - - // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes - // ` setting. - bool read_truncated = 4; - - // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes - // ` setting. - bool write_truncated = 5; -} - -// A streamed socket trace segment. Multiple segments make up a full trace. -message SocketStreamedTraceSegment { - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. Matches connection IDs used in Envoy logs. - uint64 trace_id = 1; - - oneof message_piece { - // Connection properties. - Connection connection = 2; - - // Socket event. - SocketEvent event = 3; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto b/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto deleted file mode 100644 index 769b95c6160a3..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v2alpha; - -import "envoy/data/tap/v2alpha/http.proto"; -import "envoy/data/tap/v2alpha/transport.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; -option java_outer_classname = "WrapperProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap data wrappers] - -// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for -// sending traces over gRPC APIs or more easily persisting binary messages to files. -message TraceWrapper { - oneof trace { - option (validate.required) = true; - - // An HTTP buffered tap trace. - HttpBufferedTrace http_buffered_trace = 1; - - // An HTTP streamed tap trace segment. - HttpStreamedTraceSegment http_streamed_trace_segment = 2; - - // A socket buffered tap trace. - SocketBufferedTrace socket_buffered_trace = 3; - - // A socket streamed tap trace segment. - SocketStreamedTraceSegment socket_streamed_trace_segment = 4; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v3/BUILD b/generated_api_shadow/envoy/data/tap/v3/BUILD deleted file mode 100644 index 7cdbc28e7cd45..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/data/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/tap/v3/common.proto b/generated_api_shadow/envoy/data/tap/v3/common.proto deleted file mode 100644 index 2c4fb9c61a555..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/common.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v3"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap common data] - -// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received -// and transmitted data, etc. -message Body { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Body"; - - oneof body_type { - // Body data as bytes. By default, tap body data will be present in this field, as the proto - // `bytes` type can contain any valid byte. - bytes as_bytes = 1; - - // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING - // ` sink - // format type is selected. See the documentation for that option for why this is useful. - string as_string = 2; - } - - // Specifies whether body data has been truncated to fit within the specified - // :ref:`max_buffered_rx_bytes - // ` and - // :ref:`max_buffered_tx_bytes - // ` settings. - bool truncated = 3; -} diff --git a/generated_api_shadow/envoy/data/tap/v3/http.proto b/generated_api_shadow/envoy/data/tap/v3/http.proto deleted file mode 100644 index d4f05fa09522e..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/http.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/data/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v3"; -option java_outer_classname = "HttpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP tap data] - -// A fully buffered HTTP trace message. -message HttpBufferedTrace { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.HttpBufferedTrace"; - - // HTTP message wrapper. - message Message { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.HttpBufferedTrace.Message"; - - // Message headers. - repeated config.core.v3.HeaderValue headers = 1; - - // Message body. - Body body = 2; - - // Message trailers. - repeated config.core.v3.HeaderValue trailers = 3; - } - - // Request message. - Message request = 1; - - // Response message. - Message response = 2; -} - -// A streamed HTTP trace segment. Multiple segments make up a full trace. -// [#next-free-field: 8] -message HttpStreamedTraceSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.HttpStreamedTraceSegment"; - - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. - uint64 trace_id = 1; - - oneof message_piece { - // Request headers. - config.core.v3.HeaderMap request_headers = 2; - - // Request body chunk. - Body request_body_chunk = 3; - - // Request trailers. - config.core.v3.HeaderMap request_trailers = 4; - - // Response headers. - config.core.v3.HeaderMap response_headers = 5; - - // Response body chunk. - Body response_body_chunk = 6; - - // Response trailers. - config.core.v3.HeaderMap response_trailers = 7; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v3/transport.proto b/generated_api_shadow/envoy/data/tap/v3/transport.proto deleted file mode 100644 index 0ff4b7da06043..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/transport.proto +++ /dev/null @@ -1,122 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/data/tap/v3/common.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v3"; -option java_outer_classname = "TransportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Transport tap data] -// Trace format for the tap transport socket extension. This dumps plain text read/write -// sequences on a socket. - -// Connection properties. -message Connection { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Connection"; - - // Local address. - config.core.v3.Address local_address = 2; - - // Remote address. - config.core.v3.Address remote_address = 3; -} - -// Event in a socket trace. -message SocketEvent { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketEvent"; - - // Data read by Envoy from the transport socket. - message Read { - // TODO(htuch): Half-close for reads. - - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketEvent.Read"; - - // Binary data read. - Body data = 1; - } - - // Data written by Envoy to the transport socket. - message Write { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketEvent.Write"; - - // Binary data written. - Body data = 1; - - // Stream was half closed after this write. - bool end_stream = 2; - } - - // The connection was closed. - message Closed { - // TODO(mattklein123): Close event type. - - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketEvent.Closed"; - } - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 1; - - // Read or write with content as bytes string. - oneof event_selector { - Read read = 2; - - Write write = 3; - - Closed closed = 4; - } -} - -// Sequence of read/write events that constitute a buffered trace on a socket. -// [#next-free-field: 6] -message SocketBufferedTrace { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketBufferedTrace"; - - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. Matches connection IDs used in Envoy logs. - uint64 trace_id = 1; - - // Connection properties. - Connection connection = 2; - - // Sequence of observed events. - repeated SocketEvent events = 3; - - // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes - // ` setting. - bool read_truncated = 4; - - // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes - // ` setting. - bool write_truncated = 5; -} - -// A streamed socket trace segment. Multiple segments make up a full trace. -message SocketStreamedTraceSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketStreamedTraceSegment"; - - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. Matches connection IDs used in Envoy logs. - uint64 trace_id = 1; - - oneof message_piece { - // Connection properties. - Connection connection = 2; - - // Socket event. - SocketEvent event = 3; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v3/wrapper.proto b/generated_api_shadow/envoy/data/tap/v3/wrapper.proto deleted file mode 100644 index 636547614c268..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/wrapper.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v3; - -import "envoy/data/tap/v3/http.proto"; -import "envoy/data/tap/v3/transport.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v3"; -option java_outer_classname = "WrapperProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap data wrappers] - -// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for -// sending traces over gRPC APIs or more easily persisting binary messages to files. -message TraceWrapper { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.TraceWrapper"; - - oneof trace { - option (validate.required) = true; - - // An HTTP buffered tap trace. - HttpBufferedTrace http_buffered_trace = 1; - - // An HTTP streamed tap trace segment. - HttpStreamedTraceSegment http_streamed_trace_segment = 2; - - // A socket buffered tap trace. - SocketBufferedTrace socket_buffered_trace = 3; - - // A socket streamed tap trace segment. - SocketStreamedTraceSegment socket_streamed_trace_segment = 4; - } -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD deleted file mode 100644 index a1775bbe6f513..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto deleted file mode 100644 index bca7c913a65b5..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.file.v3; - -import "envoy/config/core/v3/substitution_format_string.proto"; - -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v3"; -option java_outer_classname = "FileProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: File access log] -// [#extension: envoy.access_loggers.file] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* -// AccessLog. -// [#next-free-field: 6] -message FileAccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v2.FileAccessLog"; - - // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_len: 1}]; - - oneof access_log_format { - // Access log :ref:`format string`. - // Envoy supports :ref:`custom access log formats ` as well as a - // :ref:`default format `. - // This field is deprecated. - // Please use :ref:`log_format `. - string format = 2 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Access log :ref:`format dictionary`. All values - // are rendered as strings. - // This field is deprecated. - // Please use :ref:`log_format `. - google.protobuf.Struct json_format = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Access log :ref:`format dictionary`. Values are - // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may - // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the - // documentation for a specific command operator for details. - // This field is deprecated. - // Please use :ref:`log_format `. - google.protobuf.Struct typed_json_format = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v3.SubstitutionFormatString log_format = 5 - [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto deleted file mode 100644 index fa0a9f0f820d5..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.grpc.v3; - -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.grpc.v3"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Configuration for the built-in *envoy.access_loggers.http_grpc* -// :ref:`AccessLog `. This configuration will -// populate :ref:`StreamAccessLogsMessage.http_logs -// `. -// [#extension: envoy.access_loggers.http_grpc] -message HttpGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v2.HttpGrpcAccessLogConfig"; - - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; - - // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers - // `. - repeated string additional_request_headers_to_log = 2; - - // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers - // `. - repeated string additional_response_headers_to_log = 3; - - // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers - // `. - repeated string additional_response_trailers_to_log = 4; -} - -// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will -// populate *StreamAccessLogsMessage.tcp_logs*. -// [#extension: envoy.access_loggers.tcp_grpc] -message TcpGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v2.TcpGrpcAccessLogConfig"; - - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; -} - -// Common configuration for gRPC access logs. -// [#next-free-field: 7] -message CommonGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v2.CommonGrpcAccessLogConfig"; - - // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier - // `. This allows the - // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The gRPC service for the access log service. - config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // API version for access logs service transport protocol. This describes the access logs service - // gRPC endpoint and version of messages used on the wire. - config.core.v3.ApiVersion transport_api_version = 6 - [(validate.rules).enum = {defined_only: true}]; - - // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time - // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to - // 1 second. - google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; - - // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until - // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it - // to zero effectively disables the batching. Defaults to 16384. - google.protobuf.UInt32Value buffer_size_bytes = 4; - - // Additional filter state objects to log in :ref:`filter_state_objects - // `. - // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object. - repeated string filter_state_objects_to_log = 5; -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD deleted file mode 100644 index 37737510d8ea6..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/access_loggers/grpc/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opentelemetry_proto//:common", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto b/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto deleted file mode 100644 index 1b7027133e153..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.open_telemetry.v3alpha; - -import "envoy/extensions/access_loggers/grpc/v3/als.proto"; - -import "opentelemetry/proto/common/v1/common.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v3alpha"; -option java_outer_classname = "LogsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: OpenTelemetry (gRPC) Access Log] - -// Configuration for the built-in *envoy.access_loggers.open_telemetry* -// :ref:`AccessLog `. This configuration will -// populate `opentelemetry.proto.collector.v1.logs.ExportLogsServiceRequest.resource_logs `_. -// OpenTelemetry `Resource `_ -// attributes are filled with Envoy node info. In addition, the request start time is set in the -// dedicated field. -// [#extension: envoy.access_loggers.open_telemetry] -// [#comment:TODO(itamarkam): allow configuration for resource attributes.] -message OpenTelemetryAccessLogConfig { - // [#comment:TODO(itamarkam): add 'filter_state_objects_to_log' to logs.] - grpc.v3.CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; - - // OpenTelemetry `LogResource `_ - // fields, following `Envoy access logging formatting `_. - // - // See 'body' in the LogResource proto for more details. - // Example: ``body { string_value: "%PROTOCOL%" }``. - opentelemetry.proto.common.v1.AnyValue body = 2; - - // See 'attributes' in the LogResource proto for more details. - // Example: ``attributes { values { key: "user_agent" value { string_value: "%REQ(USER-AGENT)%" } } }``. - opentelemetry.proto.common.v1.KeyValueList attributes = 3; -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/stream.proto b/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/stream.proto deleted file mode 100644 index bd704ccdb6768..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/stream.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.stream.v3; - -import "envoy/config/core/v3/substitution_format_string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.stream.v3"; -option java_outer_classname = "StreamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Standard Streams Access loggers] -// [#extension: envoy.access_loggers.stream] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to the operating system's standard output. -message StdoutAccessLog { - oneof access_log_format { - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v3.SubstitutionFormatString log_format = 1 - [(validate.rules).message = {required: true}]; - } -} - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to the operating system's standard error. -message StderrAccessLog { - oneof access_log_format { - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v3.SubstitutionFormatString log_format = 1 - [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD deleted file mode 100644 index c37174bdefc46..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/wasm/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto deleted file mode 100644 index 44e96345dfee5..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.wasm.v3; - -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm access log] -// [#extension: envoy.access_loggers.wasm] - -// Custom configuration for an :ref:`AccessLog ` -// that calls into a WASM VM. Configures the built-in *envoy.access_loggers.wasm* -// AccessLog. -message WasmAccessLog { - envoy.extensions.wasm.v3.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto b/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto deleted file mode 100644 index 1b42e9b3f93d4..0000000000000 --- a/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.cache.simple_http_cache.v3alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.cache.simple_http_cache.v3alpha"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SimpleHttpCache CacheFilter storage plugin] - -// [#extension: envoy.cache.simple_http_cache] -message SimpleHttpCacheConfig { -} diff --git a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/BUILD b/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto b/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto deleted file mode 100644 index aead1c4517391..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.aggregate.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.aggregate.v3"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Aggregate cluster configuration] - -// Configuration for the aggregate cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.aggregate] -message ClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.aggregate.v2alpha.ClusterConfig"; - - // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they - // appear in this list. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD deleted file mode 100644 index 05f25a2fe5d91..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto deleted file mode 100644 index c4fc8285ee597..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.dynamic_forward_proxy.v3; - -import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.dynamic_forward_proxy.v3"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamic forward proxy cluster configuration] - -// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.dynamic_forward_proxy] -message ClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig"; - - // The DNS cache configuration that the cluster will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy HTTP filter configuration - // `. - common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; - - // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options - // in the :ref:`cluster's upstream_http_protocol_options - // ` - bool allow_insecure_cluster_options = 2; -} diff --git a/generated_api_shadow/envoy/extensions/clusters/redis/v3/BUILD b/generated_api_shadow/envoy/extensions/clusters/redis/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/redis/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto b/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto deleted file mode 100644 index 73598eafbe9d2..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto +++ /dev/null @@ -1,85 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.redis.v3; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.redis.v3"; -option java_outer_classname = "RedisClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Redis Cluster Configuration] -// This cluster adds support for `Redis Cluster `_, as part -// of :ref:`Envoy's support for Redis Cluster `. -// -// Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its primary fails over to a replica, and designates it as the new primary). -// However, as there is no unified frontend or proxy service in front of Redis Cluster, the client -// (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the -// topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS -// command `_. This result is then stored locally, and -// updated at user-configured intervals. -// -// Additionally, if -// :ref:`enable_redirection` -// is true, then moved and ask redirection errors from upstream servers will trigger a topology -// refresh when they exceed a user-configured error threshold. -// -// Example: -// -// .. code-block:: yaml -// -// name: name -// connect_timeout: 0.25s -// dns_lookup_family: V4_ONLY -// hosts: -// - socket_address: -// address: foo.bar.com -// port_value: 22120 -// cluster_type: -// name: envoy.clusters.redis -// typed_config: -// "@type": type.googleapis.com/google.protobuf.Struct -// value: -// cluster_refresh_rate: 30s -// cluster_refresh_timeout: 0.5s -// redirect_refresh_interval: 10s -// redirect_refresh_threshold: 10 -// [#extension: envoy.clusters.redis] - -// [#next-free-field: 7] -message RedisClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.redis.RedisClusterConfig"; - - // Interval between successive topology refresh requests. If not set, this defaults to 5s. - google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}]; - - // Timeout for topology refresh request. If not set, this defaults to 3s. - google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}]; - - // The minimum interval that must pass after triggering a topology refresh request before a new - // request can possibly be triggered again. Any errors received during one of these - // time intervals are ignored. If not set, this defaults to 5s. - google.protobuf.Duration redirect_refresh_interval = 3; - - // The number of redirection errors that must be received before - // triggering a topology refresh request. If not set, this defaults to 5. - // If this is set to 0, topology refresh after redirect is disabled. - google.protobuf.UInt32Value redirect_refresh_threshold = 4; - - // The number of failures that must be received before triggering a topology refresh request. - // If not set, this defaults to 0, which disables the topology refresh due to failure. - uint32 failure_refresh_threshold = 5; - - // The number of hosts became degraded or unhealthy before triggering a topology refresh request. - // If not set, this defaults to 0, which disables the topology refresh due to degraded or - // unhealthy host. - uint32 host_degraded_refresh_threshold = 6; -} diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD deleted file mode 100644 index 6e07b4a9226bb..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto deleted file mode 100644 index 4a0d87ff6c3b8..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ /dev/null @@ -1,146 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.dynamic_forward_proxy.v3; - -import "envoy/config/cluster/v3/cluster.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/resolver.proto"; -import "envoy/extensions/common/key_value/v3/config.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.dynamic_forward_proxy.v3"; -option java_outer_classname = "DnsCacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamic forward proxy common configuration] - -// Configuration of circuit breakers for resolver. -message DnsCacheCircuitBreakers { - // The maximum number of pending requests that Envoy will allow to the - // resolver. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 1; -} - -// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview -// ` for more information. -// [#next-free-field: 14] -message DnsCacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; - - // The name of the cache. Multiple named caches allow independent dynamic forward proxy - // configurations to operate within a single Envoy process using different configurations. All - // configurations with the same name *must* otherwise have the same settings when referenced - // from different configuration components. Configuration will fail to load if this is not - // the case. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The DNS lookup family to use during resolution. - // - // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy eyeballs" mode. The - // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and - // then configures a host to have a primary and fall back address. With this, we could very - // likely build a "happy eyeballs" connection pool which would race the primary / fall back - // address and return the one that wins. This same method could potentially also be used for - // QUIC to TCP fall back.] - config.cluster.v3.Cluster.DnsLookupFamily dns_lookup_family = 2 - [(validate.rules).enum = {defined_only: true}]; - - // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. - // - // .. note: - // - // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be - // added in a future change. - // - // .. note: - // - // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. - google.protobuf.Duration dns_refresh_rate = 3 - [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // The TTL for hosts that are unused. Hosts that have not been used in the configured time - // interval will be purged. If not specified defaults to 5m. - // - // .. note: - // - // The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This - // means that if the configured TTL is shorter than the refresh rate the host may not be removed - // immediately. - // - // .. note: - // - // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. - google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; - - // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum hosts in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; - - // If the DNS failure refresh rate is specified, - // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the dns_refresh_rate. - config.cluster.v3.Cluster.RefreshRate dns_failure_refresh_rate = 6; - - // The config of circuit breakers for resolver. It provides a configurable threshold. - // Envoy will use dns cache circuit breakers with default settings even if this value is not set. - DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - bool use_tcp_for_dns_lookups = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - config.core.v3.DnsResolutionConfig dns_resolution_config = 9; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - config.core.v3.TypedExtensionConfig typed_dns_resolver_config = 12; - - // Hostnames that should be preresolved into the cache upon creation. This might provide a - // performance improvement, in the form of cache hits, for hostnames that are going to be - // resolved during steady state and are known at config load time. - repeated config.core.v3.SocketAddress preresolve_hostnames = 10; - - // The timeout used for DNS queries. This timeout is independent of any timeout and retry policy - // used by the underlying DNS implementation (e.g., c-areas and Apple DNS) which are opaque. - // Setting this timeout will ensure that queries succeed or fail within the specified time frame - // and are then retried using the standard refresh rates. Defaults to 5s if not set. - google.protobuf.Duration dns_query_timeout = 11 [(validate.rules).duration = {gt {}}]; - - // [#not-implemented-hide:] - // Configuration to flush the DNS cache to long term storage. - key_value.v3.KeyValueStoreConfig key_value_config = 13; -} diff --git a/generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD b/generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD b/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD deleted file mode 100644 index 1afd4545d9608..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/common/matcher/v3:pkg", - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/type/matcher/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto b/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto deleted file mode 100644 index eee82a381633b..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.matching.v3; - -import "envoy/config/common/matcher/v3/matcher.proto"; -import "envoy/config/core/v3/extension.proto"; - -import "xds/type/matcher/v3/matcher.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.matching.v3"; -option java_outer_classname = "ExtensionMatcherProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Extension Matcher] - -// Wrapper around an existing extension that provides an associated matcher. This allows -// decorating an existing extension with a matcher, which can be used to match against -// relevant protocol data. -// -// [#alpha:] -message ExtensionWithMatcher { - // The associated matcher. This is deprecated in favor of xds_matcher. - config.common.matcher.v3.Matcher matcher = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The associated matcher. - xds.type.matcher.v3.Matcher xds_matcher = 3; - - // The underlying extension config. - config.core.v3.TypedExtensionConfig extension_config = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD deleted file mode 100644 index 660d629ab7b00..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/ratelimit:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto deleted file mode 100644 index 6bb771d25af94..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ /dev/null @@ -1,103 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.ratelimit.v3; - -import "envoy/type/v3/ratelimit_unit.proto"; -import "envoy/type/v3/token_bucket.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.ratelimit.v3"; -option java_outer_classname = "RatelimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common rate limit components] - -// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to -// determine the final rate limit key and overall allowed limit. Here are some examples of how -// they might be used for the domain "envoy". -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["remote_address": "10.0.0.1"] -// -// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The -// configuration supplies a default limit for the *remote_address* key. If there is a desire to -// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the -// configuration. -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["path": "/foo/bar"] -// -// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if -// configured that way in the service). -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["path": "/foo/bar"], ["remote_address": "10.0.0.1"] -// -// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. -// Like (1) we can raise/block specific IP addresses if we want with an override configuration. -// -// .. code-block:: cpp -// -// ["authenticated": "true"], ["client_id": "foo"] -// -// What it does: Limits all traffic for an authenticated client "foo" -// -// .. code-block:: cpp -// -// ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] -// -// What it does: Limits traffic to a specific path for an authenticated client "foo" -// -// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. -// This enables building complex application scenarios with a generic backend. -// -// Optionally the descriptor can contain a limit override under a "limit" key, that specifies -// the number of requests per unit to use instead of the number configured in the -// rate limiting service. -message RateLimitDescriptor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ratelimit.RateLimitDescriptor"; - - message Entry { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ratelimit.RateLimitDescriptor.Entry"; - - // Descriptor key. - string key = 1 [(validate.rules).string = {min_len: 1}]; - - // Descriptor value. - string value = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Override rate limit to apply to this descriptor instead of the limit - // configured in the rate limit service. See :ref:`rate limit override - // ` for more information. - message RateLimitOverride { - // The number of requests per unit of time. - uint32 requests_per_unit = 1; - - // The unit of time. - type.v3.RateLimitUnit unit = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // Descriptor entries. - repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Optional rate limit override to supply to the ratelimit service. - RateLimitOverride limit = 2; -} - -message LocalRateLimitDescriptor { - // Descriptor entries. - repeated v3.RateLimitDescriptor.Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Token Bucket algorithm for local ratelimiting. - type.v3.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD deleted file mode 100644 index a99fa811f859a..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto deleted file mode 100644 index 4c67af7d30081..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.tap.v3; - -import "envoy/config/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.tap.v3"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common tap extension configuration] - -// Common configuration for all tap extensions. -message CommonExtensionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.tap.v2alpha.CommonExtensionConfig"; - - oneof config_type { - option (validate.required) = true; - - // If specified, the tap filter will be configured via an admin handler. - AdminConfig admin_config = 1; - - // If specified, the tap filter will be configured via a static configuration that cannot be - // changed. - config.tap.v3.TapConfig static_config = 2; - } -} - -// Configuration for the admin handler. See :ref:`here ` for -// more information. -message AdminConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.tap.v2alpha.AdminConfig"; - - // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is - // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/brotli.proto b/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/brotli.proto deleted file mode 100644 index cb2933dd5d385..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/brotli.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.compression.brotli.compressor.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.compression.brotli.compressor.v3"; -option java_outer_classname = "BrotliProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Brotli Compressor] -// [#extension: envoy.compression.brotli.compressor] - -// [#next-free-field: 7] -message Brotli { - enum EncoderMode { - DEFAULT = 0; - GENERIC = 1; - TEXT = 2; - FONT = 3; - } - - // Value from 0 to 11 that controls the main compression speed-density lever. - // The higher quality, the slower compression. The default value is 3. - google.protobuf.UInt32Value quality = 1 [(validate.rules).uint32 = {lte: 11}]; - - // A value used to tune encoder for specific input. For more information about modes, - // please refer to brotli manual: https://brotli.org/encode.html#aa6f - // This field will be set to "DEFAULT" if not specified. - EncoderMode encoder_mode = 2 [(validate.rules).enum = {defined_only: true}]; - - // Value from 10 to 24 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 18. - // For more details about this parameter, please refer to brotli manual: - // https://brotli.org/encode.html#a9a8 - google.protobuf.UInt32Value window_bits = 3 [(validate.rules).uint32 = {lte: 24 gte: 10}]; - - // Value from 16 to 24 that represents the base two logarithmic of the compressor's input block - // size. Larger input block results in better compression at the expense of memory usage. The - // default is 24. For more details about this parameter, please refer to brotli manual: - // https://brotli.org/encode.html#a9a8 - google.protobuf.UInt32Value input_block_bits = 4 [(validate.rules).uint32 = {lte: 24 gte: 16}]; - - // Value for compressor's next output buffer. If not set, defaults to 4096. - google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; - - // If true, disables "literal context modeling" format feature. - // This flag is a "decoding-speed vs compression ratio" trade-off. - bool disable_literal_context_modeling = 6; -} diff --git a/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/brotli.proto b/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/brotli.proto deleted file mode 100644 index 24511861cf930..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/brotli.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.compression.brotli.decompressor.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.compression.brotli.decompressor.v3"; -option java_outer_classname = "BrotliProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Brotli Decompressor] -// [#extension: envoy.compression.brotli.decompressor] - -message Brotli { - // If true, disables "canny" ring buffer allocation strategy. - // Ring buffer is allocated according to window size, despite the real size of the content. - bool disable_ring_buffer_reallocation = 1; - - // Value for decompressor's next output buffer. If not set, defaults to 4096. - google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto deleted file mode 100644 index 2f37315be355c..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto +++ /dev/null @@ -1,78 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.compression.gzip.compressor.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.compressor.v3"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Gzip Compressor] -// [#extension: envoy.compression.gzip.compressor] - -// [#next-free-field: 6] -message Gzip { - // All the values of this enumeration translate directly to zlib's compression strategies. - // For more information about each strategy, please refer to zlib manual. - enum CompressionStrategy { - DEFAULT_STRATEGY = 0; - FILTERED = 1; - HUFFMAN_ONLY = 2; - RLE = 3; - FIXED = 4; - } - - enum CompressionLevel { - option allow_alias = true; - - DEFAULT_COMPRESSION = 0; - BEST_SPEED = 1; - COMPRESSION_LEVEL_1 = 1; - COMPRESSION_LEVEL_2 = 2; - COMPRESSION_LEVEL_3 = 3; - COMPRESSION_LEVEL_4 = 4; - COMPRESSION_LEVEL_5 = 5; - COMPRESSION_LEVEL_6 = 6; - COMPRESSION_LEVEL_7 = 7; - COMPRESSION_LEVEL_8 = 8; - COMPRESSION_LEVEL_9 = 9; - BEST_COMPRESSION = 9; - } - - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; - - // A value used for selecting the zlib compression level. This setting will affect speed and - // amount of compression applied to the content. "BEST_COMPRESSION" provides higher compression - // at the cost of higher latency and is equal to "COMPRESSION_LEVEL_9". "BEST_SPEED" provides - // lower compression with minimum impact on response time, the same as "COMPRESSION_LEVEL_1". - // "DEFAULT_COMPRESSION" provides an optimal result between speed and compression. According - // to zlib's manual this level gives the same result as "COMPRESSION_LEVEL_6". - // This field will be set to "DEFAULT_COMPRESSION" if not specified. - CompressionLevel compression_level = 2 [(validate.rules).enum = {defined_only: true}]; - - // A value used for selecting the zlib compression strategy which is directly related to the - // characteristics of the content. Most of the time "DEFAULT_STRATEGY" will be the best choice, - // which is also the default value for the parameter, though there are situations when - // changing this parameter might produce better results. For example, run-length encoding (RLE) - // is typically used when the content is known for having sequences which same data occurs many - // consecutive times. For more information about each strategy, please refer to zlib manual. - CompressionStrategy compression_strategy = 3 [(validate.rules).enum = {defined_only: true}]; - - // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 12 - // which will produce a 4096 bytes window. For more details about this parameter, please refer to - // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 4 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Value for Zlib's next output buffer. If not set, defaults to 4096. - // See https://www.zlib.net/manual.html for more details. Also see - // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. - google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto deleted file mode 100644 index 8fb694e883618..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.compression.gzip.decompressor.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.decompressor.v3"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Gzip Decompressor] -// [#extension: envoy.compression.gzip.decompressor] - -message Gzip { - // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size. - // The decompression window size needs to be equal or larger than the compression window size. - // The default window size is 15. - // This is so that the decompressor can decompress a response compressed by a compressor with any compression window size. - // For more details about this parameter, please refer to `zlib manual `_ > inflateInit2. - google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Value for zlib's decompressor output buffer. If not set, defaults to 4096. - // See https://www.zlib.net/manual.html for more details. - google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/dependency.proto b/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/dependency.proto deleted file mode 100644 index 9dce610afeefb..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/dependency.proto +++ /dev/null @@ -1,59 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.common.dependency.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.common.dependency.v3"; -option java_outer_classname = "DependencyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Filter dependency specification] - -// Dependency specification and string identifier. -message Dependency { - enum DependencyType { - HEADER = 0; - FILTER_STATE_KEY = 1; - DYNAMIC_METADATA = 2; - } - - // The kind of dependency. - DependencyType type = 1; - - // The string identifier for the dependency. - string name = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Dependency specification for a filter. For a filter chain to be valid, any -// dependency that is required must be provided by an earlier filter. -message FilterDependencies { - // A list of dependencies required on the decode path. - repeated Dependency decode_required = 1; - - // A list of dependencies provided on the encode path. - repeated Dependency decode_provided = 2; - - // A list of dependencies required on the decode path. - repeated Dependency encode_required = 3; - - // A list of dependencies provided on the encode path. - repeated Dependency encode_provided = 4; -} - -// Matching requirements for a filter. For a match tree to be used with a filter, the match -// requirements must be satisfied. -// -// This protobuf is provided by the filter implementation as a way to communicate the matching -// requirements to the filter factories, allowing for config rejection if the requirements are -// not satisfied. -message MatchingRequirements { - message DataInputAllowList { - // An explicit list of data inputs that are allowed to be used with this filter. - repeated string type_url = 1; - } - - DataInputAllowList data_input_allow_list = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/BUILD deleted file mode 100644 index b5020d19d58b5..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/filter/fault/v2:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto deleted file mode 100644 index bcb5bdf9bbf55..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto +++ /dev/null @@ -1,101 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.common.fault.v3; - -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.common.fault.v3"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common fault injection types] - -// Delay specification is used to inject latency into the -// HTTP/Mongo operation. -// [#next-free-field: 6] -message FaultDelay { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultDelay"; - - enum FaultDelayType { - // Unused and deprecated. - FIXED = 0; - } - - // Fault delays are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderDelay { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultDelay.HeaderDelay"; - } - - reserved 2; - - oneof fault_delay_secifier { - option (validate.required) = true; - - // Add a fixed delay before forwarding the operation upstream. See - // https://developers.google.com/protocol-buffers/docs/proto3#json for - // the JSON/YAML Duration mapping. For HTTP/Mongo, the specified - // delay will be injected before a new request/operation. - // This is required if type is FIXED. - google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; - - // Fault delays are controlled via an HTTP header (if applicable). - HeaderDelay header_delay = 5; - } - - // The percentage of operations/connections/requests on which the delay will be injected. - type.v3.FractionalPercent percentage = 4; - - FaultDelayType hidden_envoy_deprecated_type = 1 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// Describes a rate limit to be applied. -message FaultRateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultRateLimit"; - - // Describes a fixed/constant rate limit. - message FixedLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultRateLimit.FixedLimit"; - - // The limit supplied in KiB/s. - uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}]; - } - - // Rate limits are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultRateLimit.HeaderLimit"; - } - - oneof limit_type { - option (validate.required) = true; - - // A fixed rate limit. - FixedLimit fixed_limit = 1; - - // Rate limits are controlled via an HTTP header (if applicable). - HeaderLimit header_limit = 3; - } - - // The percentage of operations/connections/requests on which the rate limit will be injected. - type.v3.FractionalPercent percentage = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/skip_action.proto b/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/skip_action.proto deleted file mode 100644 index 2835c9f6d75a6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/skip_action.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.common.matcher.action.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.common.matcher.action.v3"; -option java_outer_classname = "SkipActionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common Match Actions] - -// Configuration for the SkipFilter match action. When matching results in this action, the -// associated filter will be ignored for all filter callbacks (e.g. `encodeHeaders`, `encodeData`, -// etc. for HTTP filters) after the matcher arrives at the match, including the callback that -// caused the match result. For example, when used with a HTTP filter and the match result was -// resolved after receiving the HTTP response headers, the HTTP filter will *not* receive the -// response header callback. -// -// As a result, if this match action is resolved before the first filter callback (e.g. HTTP request -// headers), the filter will be completely skipped. -message SkipFilter { -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto deleted file mode 100644 index c524e022e8594..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ /dev/null @@ -1,107 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.adaptive_concurrency.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.adaptive_concurrency.v3"; -option java_outer_classname = "AdaptiveConcurrencyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Adaptive Concurrency] -// Adaptive Concurrency Control :ref:`configuration overview -// `. -// [#extension: envoy.filters.http.adaptive_concurrency] - -// Configuration parameters for the gradient controller. -message GradientControllerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig"; - - // Parameters controlling the periodic recalculation of the concurrency limit from sampled request - // latencies. - message ConcurrencyLimitCalculationParams { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig." - "ConcurrencyLimitCalculationParams"; - - // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000. - google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}]; - - // The period of time samples are taken to recalculate the concurrency limit. - google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = { - required: true - gt {} - }]; - } - - // Parameters controlling the periodic minRTT recalculation. - // [#next-free-field: 6] - message MinimumRTTCalculationParams { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig." - "MinimumRTTCalculationParams"; - - // The time interval between recalculating the minimum request round-trip time. Has to be - // positive. - google.protobuf.Duration interval = 1 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // The number of requests to aggregate/sample during the minRTT recalculation window before - // updating. Defaults to 50. - google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}]; - - // Randomized time delta that will be introduced to the start of the minRTT calculation window. - // This is represented as a percentage of the interval duration. Defaults to 15%. - // - // Example: If the interval is 10s and the jitter is 15%, the next window will begin - // somewhere in the range (10s - 11.5s). - type.v3.Percent jitter = 3; - - // The concurrency limit set while measuring the minRTT. Defaults to 3. - google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}]; - - // Amount added to the measured minRTT to add stability to the concurrency limit during natural - // variability in latency. This is expressed as a percentage of the measured value and can be - // adjusted to allow more or less tolerance to the sampled latency values. - // - // Defaults to 25%. - type.v3.Percent buffer = 5; - } - - // The percentile to use when summarizing aggregated samples. Defaults to p50. - type.v3.Percent sample_aggregate_percentile = 1; - - ConcurrencyLimitCalculationParams concurrency_limit_params = 2 - [(validate.rules).message = {required: true}]; - - MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}]; -} - -message AdaptiveConcurrency { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency"; - - oneof concurrency_controller_config { - option (validate.required) = true; - - // Gradient concurrency control will be used. - GradientControllerConfig gradient_controller_config = 1 - [(validate.rules).message = {required: true}]; - } - - // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the - // message is unspecified, the filter will be enabled. - config.core.v3.RuntimeFeatureFlag enabled = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD deleted file mode 100644 index ad2fc9a9a84fd..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto b/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto deleted file mode 100644 index e628a6ca73fbb..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.alternate_protocols_cache.v3; - -import "envoy/config/core/v3/protocol.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.alternate_protocols_cache.v3"; -option java_outer_classname = "AlternateProtocolsCacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Alternate Protocols Cache] - -// Configuration for the alternate protocols cache HTTP filter. -// [#extension: envoy.filters.http.alternate_protocols_cache] -// TODO(RyanTheOptimist): Move content from source/docs/http3_upstream.md to -// docs/root/intro/arch_overview/upstream/connection_pooling.rst when unhiding the proto. -message FilterConfig { - // [#not-implemented-hide:] - // If set, causes the use of the alternate protocols cache, which is responsible for - // parsing and caching HTTP Alt-Svc headers. This enables the use of HTTP/3 for upstream - // servers that advertise supporting it. - // TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled via auto_http. - config.core.v3.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto b/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto deleted file mode 100644 index b4b9cc398f2e4..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.aws_lambda.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_lambda.v3"; -option java_outer_classname = "AwsLambdaProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: AWS Lambda] -// AWS Lambda :ref:`configuration overview `. -// [#extension: envoy.filters.http.aws_lambda] - -// AWS Lambda filter config -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.aws_lambda.v2alpha.Config"; - - enum InvocationMode { - // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In - // this mode the output of the Lambda function becomes the response of the HTTP request. - SYNCHRONOUS = 0; - - // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be - // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the - // call which is translated to an HTTP 200 OK by the filter. - ASYNCHRONOUS = 1; - } - - // The ARN of the AWS Lambda to invoke when the filter is engaged - // Must be in the following format: - // arn::lambda:::function: - string arn = 1 [(validate.rules).string = {min_len: 1}]; - - // Whether to transform the request (headers and body) to a JSON payload or pass it as is. - bool payload_passthrough = 2; - - // Determines the way to invoke the Lambda function. - InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; -} - -// Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different -// version of the same Lambda depending on the route. -message PerRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.aws_lambda.v2alpha.PerRouteConfig"; - - Config invoke_config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto b/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto deleted file mode 100644 index ae46400130d52..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.aws_request_signing.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_request_signing.v3"; -option java_outer_classname = "AwsRequestSigningProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: AwsRequestSigning] -// AwsRequestSigning :ref:`configuration overview `. -// [#extension: envoy.filters.http.aws_request_signing] - -// Top level configuration for the AWS request signing filter. -message AwsRequestSigning { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.aws_request_signing.v2alpha.AwsRequestSigning"; - - // The `service namespace - // `_ - // of the HTTP endpoint. - // - // Example: s3 - string service_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The `region `_ hosting the HTTP - // endpoint. - // - // Example: us-west-2 - string region = 2 [(validate.rules).string = {min_len: 1}]; - - // Indicates that before signing headers, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both signing and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for signing whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite = 3; - - // Instead of buffering the request to calculate the payload hash, use the literal string ``UNSIGNED-PAYLOAD`` - // to calculate the payload hash. Not all services support this option. See the `S3 - // `_ policy for details. - bool use_unsigned_payload = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto deleted file mode 100644 index 4cd5f8268b704..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.bandwidth_limit.v3alpha; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.bandwidth_limit.v3alpha"; -option java_outer_classname = "BandwidthLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Bandwidth limit] -// Bandwidth limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.bandwidth_limit] - -// [#next-free-field: 6] -message BandwidthLimit { - // Defines the mode for the bandwidth limit filter. - // Values represent bitmask. - enum EnableMode { - // Filter is disabled. - DISABLED = 0; - - // Filter enabled only for incoming traffic. - REQUEST = 1; - - // Filter enabled only for outgoing traffic. - RESPONSE = 2; - - // Filter enabled for both incoming and outgoing traffic. - REQUEST_AND_RESPONSE = 3; - } - - // The human readable prefix to use when emitting stats. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The enable mode for the bandwidth limit filter. - // Default is Disabled. - EnableMode enable_mode = 2 [(validate.rules).enum = {defined_only: true}]; - - // The limit supplied in KiB/s. - // - // .. note:: - // It's fine for the limit to be unset for the global configuration since the bandwidth limit - // can be applied at a the virtual host or route level. Thus, the limit must be set for the - // per route configuration otherwise the config will be rejected. - // - // .. note:: - // When using per route configuration, the limit becomes unique to that route. - // - google.protobuf.UInt64Value limit_kbps = 3 [(validate.rules).uint64 = {gte: 1}]; - - // Optional Fill interval in milliseconds for the token refills. Defaults to 50ms. - // It must be at least 20ms to avoid too aggressive refills. - google.protobuf.Duration fill_interval = 4 [(validate.rules).duration = { - lte {seconds: 1} - gte {nanos: 20000000} - }]; - - // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults - // to enabled. - config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto b/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto deleted file mode 100644 index 6f73244032c4e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.buffer.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.buffer.v3"; -option java_outer_classname = "BufferProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Buffer] -// Buffer :ref:`configuration overview `. -// [#extension: envoy.filters.http.buffer] - -message Buffer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.buffer.v2.Buffer"; - - reserved 2; - - // The maximum request size that the filter will buffer before the connection - // manager will stop buffering and return a 413 response. - google.protobuf.UInt32Value max_request_bytes = 1 - [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}]; -} - -message BufferPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.buffer.v2.BufferPerRoute"; - - oneof override { - option (validate.required) = true; - - // Disable the buffer filter for this particular vhost or route. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Override the global configuration of the filter with this new config. - Buffer buffer = 2 [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/BUILD deleted file mode 100644 index c0ffdf28daaf9..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/composite/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/composite/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/composite/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/composite/v3/composite.proto b/generated_api_shadow/envoy/extensions/filters/http/composite/v3/composite.proto deleted file mode 100644 index f8a3bd83af567..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/composite/v3/composite.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.composite.v3; - -import "envoy/config/core/v3/extension.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.composite.v3"; -option java_outer_classname = "CompositeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Composite] -// Composite Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.composite] - -// :ref:`Composite filter ` config. The composite filter config -// allows delegating filter handling to another filter as determined by matching on the request -// headers. This makes it possible to use different filters or filter configurations based on the -// incoming request. -// -// This is intended to be used with -// :ref:`ExtensionWithMatcher ` -// where a match tree is specified that indicates (via -// :ref:`ExecuteFilterAction `) -// which filter configuration to create and delegate to. -// -// [#alpha:] -message Composite { -} - -// Composite match action (see :ref:`matching docs ` for more info on match actions). -// This specifies the filter configuration of the filter that the composite filter should delegate filter interactions to. -message ExecuteFilterAction { - config.core.v3.TypedExtensionConfig typed_config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD deleted file mode 100644 index a1775bbe6f513..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto deleted file mode 100644 index 72b435c93ddaa..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ /dev/null @@ -1,125 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.compressor.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v3"; -option java_outer_classname = "CompressorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Compressor] -// Compressor :ref:`configuration overview `. -// [#extension: envoy.filters.http.compressor] - -// [#next-free-field: 9] -message Compressor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.compressor.v2.Compressor"; - - message CommonDirectionConfig { - // Runtime flag that controls whether compression is enabled or not for the direction this - // common config is put in. If set to false, the filter will operate as a pass-through filter - // in the chosen direction. If the field is omitted, the filter will be enabled. - config.core.v3.RuntimeFeatureFlag enabled = 1; - - // Minimum value of Content-Length header of request or response messages (depending on the direction - // this common config is put in), in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value min_content_length = 2; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" - // and their synonyms. - repeated string content_type = 3; - } - - // Configuration for filter behavior on the request direction. - message RequestDirectionConfig { - CommonDirectionConfig common_config = 1; - } - - // Configuration for filter behavior on the response direction. - message ResponseDirectionConfig { - CommonDirectionConfig common_config = 1; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - bool disable_on_etag_header = 2; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // - // .. attention:: - // - // To avoid interfering with other compression filters in the same chain use this option in - // the filter closest to the upstream. - bool remove_accept_encoding_header = 3; - } - - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value content_length = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" - // and their synonyms. - repeated string content_type = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - bool disable_on_etag_header = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // - // .. attention:: - // - // To avoid interfering with other compression filters in the same chain use this option in - // the filter closest to the upstream. - bool remove_accept_encoding_header = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Runtime flag that controls whether the filter is enabled or not. If set to false, the - // filter will operate as a pass-through filter. If not specified, defaults to enabled. - config.core.v3.RuntimeFeatureFlag runtime_enabled = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // A compressor library to use for compression. Currently only - // :ref:`envoy.compression.gzip.compressor` - // is included in Envoy. - // [#extension-category: envoy.compression.compressor] - config.core.v3.TypedExtensionConfig compressor_library = 6 - [(validate.rules).message = {required: true}]; - - // Configuration for request compression. Compression is disabled by default if left empty. - RequestDirectionConfig request_direction_config = 7; - - // Configuration for response compression. Compression is enabled by default if left empty. - // - // .. attention:: - // - // If the field is not empty then the duplicate deprecated fields of the `Compressor` message, - // such as `content_length`, `content_type`, `disable_on_etag_header`, - // `remove_accept_encoding_header` and `runtime_enabled`, are ignored. - // - // Also all the statistics related to response compression will be rooted in - // `.compressor...response.*` - // instead of - // `.compressor...*`. - ResponseDirectionConfig response_direction_config = 8; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cors/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto b/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto deleted file mode 100644 index 0269e1bdfd8c7..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.cors.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cors.v3"; -option java_outer_classname = "CorsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Cors] -// CORS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.cors] - -// Cors filter config. -message Cors { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.cors.v2.Cors"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/BUILD deleted file mode 100644 index 3f3a5395d2aa7..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto b/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto deleted file mode 100644 index 39b0455bd7981..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.csrf.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v3"; -option java_outer_classname = "CsrfProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: CSRF] -// Cross-Site Request Forgery :ref:`configuration overview `. -// [#extension: envoy.filters.http.csrf] - -// CSRF filter config. -message CsrfPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.csrf.v2.CsrfPolicy"; - - // Specifies the % of requests for which the CSRF filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - config.core.v3.RuntimeFractionalPercent filter_enabled = 1 - [(validate.rules).message = {required: true}]; - - // Specifies that CSRF policies will be evaluated and tracked, but not enforced. - // - // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* and *Destination* to determine if it's valid, but will not - // enforce any policies. - config.core.v3.RuntimeFractionalPercent shadow_enabled = 2; - - // Specifies additional source origins that will be allowed in addition to - // the destination origin. - // - // More information on how this can be configured via runtime can be found - // :ref:`here `. - repeated type.matcher.v3.StringMatcher additional_origins = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto deleted file mode 100644 index c4cca44020f6d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.decompressor.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.decompressor.v3"; -option java_outer_classname = "DecompressorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Decompressor] -// [#extension: envoy.filters.http.decompressor] - -message Decompressor { - // Common configuration for filter behavior on both the request and response direction. - message CommonDirectionConfig { - // Runtime flag that controls whether the filter is enabled for decompression or not. If set to false, the - // filter will operate as a pass-through filter. If the message is unspecified, the filter will be enabled. - config.core.v3.RuntimeFeatureFlag enabled = 1; - } - - // Configuration for filter behavior on the request direction. - message RequestDirectionConfig { - CommonDirectionConfig common_config = 1; - - // If set to true, and response decompression is enabled, the filter modifies the Accept-Encoding - // request header by appending the decompressor_library's encoding. Defaults to true. - google.protobuf.BoolValue advertise_accept_encoding = 2; - } - - // Configuration for filter behavior on the response direction. - message ResponseDirectionConfig { - CommonDirectionConfig common_config = 1; - } - - // A decompressor library to use for both request and response decompression. Currently only - // :ref:`envoy.compression.gzip.compressor` - // is included in Envoy. - // [#extension-category: envoy.compression.decompressor] - config.core.v3.TypedExtensionConfig decompressor_library = 1 - [(validate.rules).message = {required: true}]; - - // Configuration for request decompression. Decompression is enabled by default if left empty. - RequestDirectionConfig request_direction_config = 2; - - // Configuration for response decompression. Decompression is enabled by default if left empty. - ResponseDirectionConfig response_direction_config = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD deleted file mode 100644 index 05f25a2fe5d91..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto deleted file mode 100644 index a5d7223b98d28..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.dynamic_forward_proxy.v3; - -import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamic_forward_proxy.v3"; -option java_outer_classname = "DynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamic forward proxy] - -// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.filters.http.dynamic_forward_proxy] -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig"; - - // The DNS cache configuration that the filter will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy cluster configuration - // `. - common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; -} - -// Per route Configuration for the dynamic forward proxy HTTP filter. -message PerRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.dynamic_forward_proxy.v2alpha.PerRouteConfig"; - - oneof host_rewrite_specifier { - // Indicates that before DNS lookup, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for DNS lookups whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite_literal = 1; - - // Indicates that before DNS lookup, the host header will be swapped with - // the value of this header. If not set or empty, the original host header - // value will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite header ` - // given that the value set here would be used for DNS lookups whereas the value set in the HCM - // would be used for host header forwarding which is not the desired outcome. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string host_rewrite_header = 2; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto b/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto deleted file mode 100644 index 13a4f1c6ceee0..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.dynamo.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamo.v3"; -option java_outer_classname = "DynamoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamo] -// Dynamo :ref:`configuration overview `. -// [#extension: envoy.filters.http.dynamo] - -// Dynamo filter config. -message Dynamo { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.dynamo.v2.Dynamo"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD deleted file mode 100644 index bc2a58d2a7f1c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto deleted file mode 100644 index 62feb51b191d5..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ /dev/null @@ -1,317 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ext_authz.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/config/core/v3/http_uri.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/v3/http_status.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v3"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: External Authorization] -// External Authorization :ref:`configuration overview `. -// [#extension: envoy.filters.http.ext_authz] - -// [#next-free-field: 16] -message ExtAuthz { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; - - reserved 4; - - reserved "use_alpha"; - - // External authorization service configuration. - oneof services { - // gRPC service configuration (default timeout: 200ms). - config.core.v3.GrpcService grpc_service = 1; - - // HTTP service configuration (default timeout: 200ms). - HttpService http_service = 3; - } - - // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and - // version of messages used on the wire. - config.core.v3.ApiVersion transport_api_version = 12 - [(validate.rules).enum = {defined_only: true}]; - - // Changes filter's behaviour on errors: - // - // 1. When set to true, the filter will *accept* client request even if the communication with - // the authorization service has failed, or if the authorization service has returned a HTTP 5xx - // error. - // - // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* - // response if the communication with the authorization service has failed, or if the - // authorization service has returned a HTTP 5xx error. - // - // Note that errors can be *always* tracked in the :ref:`stats - // `. - bool failure_mode_allow = 2; - - // Enables filter to buffer the client request body and send it within the authorization request. - // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization - // request message indicating if the body data is partial. - BufferSettings with_request_body = 5; - - // Clears route cache in order to allow the external authorization service to correctly affect - // routing decisions. Filter clears all cached routes when: - // - // 1. The field is set to *true*. - // - // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. - // - // 3. At least one *authorization response header* is added to the client request, or is used for - // altering another client request header. - // - bool clear_route_cache = 6; - - // Sets the HTTP status that is returned to the client when there is a network error between the - // filter and the authorization server. The default status is HTTP 403 Forbidden. - type.v3.HttpStatus status_on_error = 7; - - // Specifies a list of metadata namespaces whose values, if present, will be passed to the - // ext_authz service as an opaque *protobuf::Struct*. - // - // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata - // ` is set, - // then the following will pass the jwt payload to the authorization server. - // - // .. code-block:: yaml - // - // metadata_context_namespaces: - // - envoy.filters.http.jwt_authn - // - repeated string metadata_context_namespaces = 8; - - // Specifies if the filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // If this field is not specified, the filter will be enabled for all requests. - config.core.v3.RuntimeFractionalPercent filter_enabled = 9; - - // Specifies if the filter is enabled with metadata matcher. - // If this field is not specified, the filter will be enabled for all requests. - type.matcher.v3.MetadataMatcher filter_enabled_metadata = 14; - - // Specifies whether to deny the requests, when the filter is disabled. - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to determine whether to deny request for - // filter protected path at filter disabling. If filter is disabled in - // typed_per_filter_config for the path, requests will not be denied. - // - // If this field is not specified, all requests will be allowed when disabled. - config.core.v3.RuntimeFeatureFlag deny_at_disable = 11; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 10; - - // Optional additional prefix to use when emitting statistics. This allows to distinguish - // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: - // - // .. code-block:: yaml - // - // http_filters: - // - name: envoy.filters.http.ext_authz - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. - // - name: envoy.filters.http.ext_authz - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. - // - string stat_prefix = 13; - - // Optional labels that will be passed to :ref:`labels` in - // :ref:`destination`. - // The labels will be read from :ref:`metadata` with the specified key. - string bootstrap_metadata_labels_key = 15; -} - -// Configuration for buffering the request data. -message BufferSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.BufferSettings"; - - // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return - // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number - // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow - // `. - uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; - - // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. - // The authorization request will be dispatched and no 413 HTTP error will be returned by the - // filter. - bool allow_partial_message = 2; - - // If true, the body sent to the external authorization service is set with raw bytes, it sets - // the :ref:`raw_body` - // field of HTTP request attribute context. Otherwise, :ref:` - // body` will be filled - // with UTF-8 string request body. - bool pack_as_bytes = 3; -} - -// HttpService is used for raw HTTP communication between the filter and the authorization service. -// When configured, the filter will parse the client request and use these attributes to call the -// authorization server. Depending on the response, the filter may reject or accept the client -// request. Note that in any of these events, metadata can be added, removed or overridden by the -// filter: -// -// *On authorization request*, a list of allowed request headers may be supplied. See -// :ref:`allowed_headers -// ` -// for details. Additional headers metadata may be added to the authorization request. See -// :ref:`headers_to_add -// ` for -// details. -// -// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and -// additional headers metadata may be added to the original client request. See -// :ref:`allowed_upstream_headers -// ` -// for details. Additionally, the filter may add additional headers to the client's response. See -// :ref:`allowed_client_headers_on_success -// ` -// for details. -// -// On other authorization response statuses, the filter will not allow traffic. Additional headers -// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers -// ` -// for details. -// [#next-free-field: 9] -message HttpService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.HttpService"; - - reserved 3, 4, 5, 6; - - // Sets the HTTP server URI which the authorization requests must be sent to. - config.core.v3.HttpUri server_uri = 1; - - // Sets a prefix to the value of authorization request header *Path*. - string path_prefix = 2; - - // Settings used for controlling authorization request metadata. - AuthorizationRequest authorization_request = 7; - - // Settings used for controlling authorization response metadata. - AuthorizationResponse authorization_response = 8; -} - -message AuthorizationRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.AuthorizationRequest"; - - // Authorization request includes the client request headers that have a correspondent match - // in the :ref:`list `. - // - // .. note:: - // - // In addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, - // ``Content-Length``, and ``Authorization`` are **automatically included** to the list. - // - // .. note:: - // - // By default, ``Content-Length`` header is set to ``0`` and the request to the authorization - // service has no message body. However, the authorization request *may* include the buffered - // client request body (controlled by :ref:`with_request_body - // ` - // setting) hence the value of its ``Content-Length`` reflects the size of its payload size. - // - type.matcher.v3.ListStringMatcher allowed_headers = 1; - - // Sets a list of headers that will be included to the request to authorization service. Note that - // client request of the same key will be overridden. - repeated config.core.v3.HeaderValue headers_to_add = 2; -} - -message AuthorizationResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.AuthorizationResponse"; - - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the original client request. - // Note that coexistent headers will be overridden. - type.matcher.v3.ListStringMatcher allowed_upstream_headers = 1; - - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that coexistent headers will be appended. - type.matcher.v3.ListStringMatcher allowed_upstream_headers_to_append = 3; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that when this list is *not* set, all the authorization response headers, except *Authority - // (Host)* will be in the response to the client. When a header is included in this list, *Path*, - // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. - type.matcher.v3.ListStringMatcher allowed_client_headers = 2; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response when - // the authorization response itself is successful, i.e. not failed or denied. When this list is - // *not* set, no additional headers will be added to the client's response on success. - type.matcher.v3.ListStringMatcher allowed_client_headers_on_success = 4; -} - -// Extra settings on a per virtualhost/route/weighted-cluster level. -message ExtAuthzPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.ExtAuthzPerRoute"; - - oneof override { - option (validate.required) = true; - - // Disable the ext auth filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Check request settings for this route. - CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; - } -} - -// Extra settings for the check request. -message CheckSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.CheckSettings"; - - // Context extensions to set on the CheckRequest's - // :ref:`AttributeContext.context_extensions` - // - // You can use this to provide extra context for the external authorization server on specific - // virtual hosts/routes. For example, adding a context extension on the virtual host level can - // give the ext-authz server information on what virtual host is used without needing to parse the - // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged - // in order, and the result will be used. - // - // Merge semantics for this field are such that keys from more specific configs override. - // - // .. note:: - // - // These settings are only applied to a filter configured with a - // :ref:`grpc_service`. - map context_extensions = 1 [(udpa.annotations.sensitive) = true]; - - // When set to true, disable the configured :ref:`with_request_body - // ` for a route. - bool disable_request_body_buffering = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto deleted file mode 100644 index 37560feba3c27..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto +++ /dev/null @@ -1,186 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ext_proc.v3alpha; - -import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha"; -option java_outer_classname = "ExtProcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: External Processing Filter] -// External Processing Filter -// [#extension: envoy.filters.http.ext_proc] - -// The External Processing filter allows an external service to act on HTTP traffic in a flexible way. - -// **Current Implementation Status:** -// All options and processing modes are implemented except for the following: -// -// * Request and response attributes are not sent and not processed. -// * Dynamic metadata in responses from the external processor is ignored. -// * "async mode" is not implemented -// * Per-route configuration is not implemented - -// The filter communicates with an external gRPC service called an "external processor" -// that can do a variety of things with the request and response: -// -// * Access and modify the HTTP headers on the request, response, or both -// * Access and modify the HTTP request and response bodies -// * Access and modify the dynamic stream metadata -// * Immediately send an HTTP response downstream and terminate other processing -// -// The filter communicates with the server using a gRPC bidirectional stream. After the initial -// request, the external server is in control over what additional data is sent to it -// and how it should be processed. -// -// By implementing the protocol specified by the stream, the external server can choose: -// -// * Whether it receives the response message at all -// * Whether it receives the message body at all, in separate chunks, or as a single buffer -// * Whether subsequent HTTP requests are transmitted synchronously or whether they are -// sent asynchronously. -// * To modify request or response trailers if they already exist -// * To add request or response trailers where they are not present -// -// The filter supports up to six different processing steps. Each is represented by -// a gRPC stream message that is sent to the external processor. For each message, the -// processor must send a matching response. -// -// * Request headers: Contains the headers from the original HTTP request. -// * Request body: Sent in a single message if the BUFFERED or BUFFERED_PARTIAL -// mode is chosen, in multiple messages if the STREAMED mode is chosen, and not -// at all otherwise. -// * Request trailers: Delivered if they are present and if the trailer mode is set -// to SEND. -// * Response headers: Contains the headers from the HTTP response. Keep in mind -// that if the upstream system sends them before processing the request body that -// this message may arrive before the complete body. -// * Response body: Sent according to the processing mode like the request body. -// * Response trailers: Delivered according to the processing mode like the -// request trailers. -// -// By default, the processor sends only the request and response headers messages. -// This may be changed to include any of the six steps by changing the processing_mode -// setting of the filter configuration, or by setting the mode_override of any response -// from the external processor. This way, a processor may, for example, use information -// in the request header to determine whether the message body must be examined, or whether -// the proxy should simply stream it straight through. -// -// All of this together allows a server to process the filter traffic in fairly -// sophisticated ways. For example: -// -// * A server may choose to examine all or part of the HTTP message bodies depending -// on the content of the headers. -// * A server may choose to immediately reject some messages based on their HTTP -// headers (or other dynamic metadata) and more carefully examine others -// * A server may asynchronously monitor traffic coming through the filter by inspecting -// headers, bodies, or both, and then decide to switch to a synchronous processing -// mode, either permanently or temporarily. -// -// The protocol itself is based on a bidirectional gRPC stream. Envoy will send the -// server -// :ref:`ProcessingRequest ` -// messages, and the server must reply with -// :ref:`ProcessingResponse `. - -// [#next-free-field: 9] -message ExternalProcessor { - // Configuration for the gRPC service that the filter will communicate with. - // The filter supports both the "Envoy" and "Google" gRPC clients. - config.core.v3.GrpcService grpc_service = 1; - - // By default, if the gRPC stream cannot be established, or if it is closed - // prematurely with an error, the filter will fail. Specifically, if the - // response headers have not yet been delivered, then it will return a 500 - // error downstream. If they have been delivered, then instead the HTTP stream to the - // downstream client will be reset. - // With this parameter set to true, however, then if the gRPC stream is prematurely closed - // or could not be opened, processing continues without error. - bool failure_mode_allow = 2; - - // Specifies default options for how HTTP headers, trailers, and bodies are - // sent. See ProcessingMode for details. - ProcessingMode processing_mode = 3; - - // [#not-implemented-hide:] - // If true, send each part of the HTTP request or response specified by ProcessingMode - // asynchronously -- in other words, send the message on the gRPC stream and then continue - // filter processing. If false, which is the default, suspend filter execution after - // each message is sent to the remote service and wait up to "message_timeout" - // for a reply. - bool async_mode = 4; - - // [#not-implemented-hide:] - // Envoy provides a number of :ref:`attributes ` - // for expressive policies. Each attribute name provided in this field will be - // matched against that list and populated in the request_headers message. - // See the :ref:`attribute documentation ` - // for the list of supported attributes and their types. - repeated string request_attributes = 5; - - // [#not-implemented-hide:] - // Envoy provides a number of :ref:`attributes ` - // for expressive policies. Each attribute name provided in this field will be - // matched against that list and populated in the response_headers message. - // See the :ref:`attribute documentation ` - // for the list of supported attributes and their types. - repeated string response_attributes = 6; - - // Specifies the timeout for each individual message sent on the stream and - // when the filter is running in synchronous mode. Whenever - // the proxy sends a message on the stream that requires a response, it will - // reset this timer, and will stop processing and return an error (subject - // to the processing mode) if the timer expires before a matching response. - // is received. There is no timeout when the filter is running in asynchronous - // mode. Default is 200 milliseconds. - google.protobuf.Duration message_timeout = 7; - - // [#not-implemented-hide:] - // Optional additional prefix to use when emitting statistics. This allows to distinguish - // emitted statistics between configured *ext_proc* filters in an HTTP filter chain. - string stat_prefix = 8; -} - -// Extra settings that may be added to per-route configuration for a -// virtual host or cluster. -message ExtProcPerRoute { - oneof override { - option (validate.required) = true; - - // Disable the filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Override aspects of the configuration for this route. A set of - // overrides in a more specific configuration will override a "disabled" - // flag set in a less-specific one. - ExtProcOverrides overrides = 2; - } -} - -// Overrides that may be set on a per-route basis -message ExtProcOverrides { - // Set a different processing mode for this route than the default. - ProcessingMode processing_mode = 1; - - // [#not-implemented-hide:] - // Set a different asynchronous processing option than the default. - bool async_mode = 2; - - // [#not-implemented-hide:] - // Set different optional properties than the default. - repeated string request_properties = 3; - - // [#not-implemented-hide:] - // Set different optional properties than the default. - repeated string response_properties = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/BUILD deleted file mode 100644 index 53db91cad82c3..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v3:pkg", - "//envoy/extensions/filters/common/fault/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto deleted file mode 100644 index 0c7fbb4480cfe..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.fault.v3; - -import "envoy/config/route/v3/route_components.proto"; -import "envoy/extensions/filters/common/fault/v3/fault.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v3"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Fault Injection] -// Fault Injection :ref:`configuration overview `. -// [#extension: envoy.filters.http.fault] - -// [#next-free-field: 6] -message FaultAbort { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.fault.v2.FaultAbort"; - - // Fault aborts are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderAbort { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.fault.v2.FaultAbort.HeaderAbort"; - } - - reserved 1; - - oneof error_type { - option (validate.required) = true; - - // HTTP status code to use to abort the HTTP request. - uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // gRPC status code to use to abort the gRPC request. - uint32 grpc_status = 5; - - // Fault aborts are controlled via an HTTP header (if applicable). - HeaderAbort header_abort = 4; - } - - // The percentage of requests/operations/connections that will be aborted with the error code - // provided. - type.v3.FractionalPercent percentage = 3; -} - -// [#next-free-field: 16] -message HTTPFault { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.fault.v2.HTTPFault"; - - // If specified, the filter will inject delays based on the values in the - // object. - common.fault.v3.FaultDelay delay = 1; - - // If specified, the filter will abort requests based on the values in - // the object. At least *abort* or *delay* must be specified. - FaultAbort abort = 2; - - // Specifies the name of the (destination) upstream cluster that the - // filter should match on. Fault injection will be restricted to requests - // bound to the specific upstream cluster. - string upstream_cluster = 3; - - // Specifies a set of headers that the filter should match on. The fault - // injection filter can be applied selectively to requests that match a set of - // headers specified in the fault filter config. The chances of actual fault - // injection further depend on the value of the :ref:`percentage - // ` field. - // The filter will check the request's headers against all the specified - // headers in the filter config. A match will happen if all the headers in the - // config are present in the request with the same values (or based on - // presence if the *value* field is not in the config). - repeated config.route.v3.HeaderMatcher headers = 4; - - // Faults are injected for the specified list of downstream hosts. If this - // setting is not set, faults are injected for all downstream nodes. - // Downstream node name is taken from :ref:`the HTTP - // x-envoy-downstream-service-node - // ` header and compared - // against downstream_nodes list. - repeated string downstream_nodes = 5; - - // The maximum number of faults that can be active at a single time via the configured fault - // filter. Note that because this setting can be overridden at the route level, it's possible - // for the number of active faults to be greater than this value (if injected via a different - // route). If not specified, defaults to unlimited. This setting can be overridden via - // `runtime ` and any faults that are not injected - // due to overflow will be indicated via the `faults_overflow - // ` stat. - // - // .. attention:: - // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy - // limit. It's possible for the number of active faults to rise slightly above the configured - // amount due to the implementation details. - google.protobuf.UInt32Value max_active_faults = 6; - - // The response rate limit to be applied to the response body of the stream. When configured, - // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent - // ` runtime key. - // - // .. attention:: - // This is a per-stream limit versus a connection level limit. This means that concurrent streams - // will each get an independent limit. - common.fault.v3.FaultRateLimit response_rate_limit = 7; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_delay_percent - string delay_percent_runtime = 8; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.abort_percent - string abort_percent_runtime = 9; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_duration_ms - string delay_duration_runtime = 10; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.http_status - string abort_http_status_runtime = 11; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.max_active_faults - string max_active_faults_runtime = 12; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.rate_limit.response_percent - string response_rate_limit_percent_runtime = 13; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.grpc_status - string abort_grpc_status_runtime = 14; - - // To control whether stats storage is allocated dynamically for each downstream server. - // If set to true, "x-envoy-downstream-service-cluster" field of header will be ignored by this filter. - // If set to false, dynamic stats storage will be allocated for the downstream cluster name. - // Default value is false. - bool disable_downstream_cluster_stats = 15; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto deleted file mode 100644 index 7e31da49e92ba..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_http1_bridge.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_bridge.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC HTTP/1.1 Bridge] -// gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_http1_bridge] - -// gRPC HTTP/1.1 Bridge filter config. -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_http1_bridge.v2.Config"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto deleted file mode 100644 index 615fea923a8e1..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto +++ /dev/null @@ -1,60 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] -// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview -// `. -// [#extension: envoy.filters.http.grpc_http1_reverse_bridge] - -// gRPC reverse bridge filter configuration -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfig"; - - // The content-type to pass to the upstream when the gRPC bridge filter is applied. - // The filter will also validate that the upstream responds with the same content type. - string content_type = 1 [(validate.rules).string = {min_len: 1}]; - - // If true, Envoy will assume that the upstream doesn't understand gRPC frames and - // strip the gRPC frame from the request, and add it back in to the response. This will - // hide the gRPC semantics from the upstream, allowing it to receive and respond with a - // simple binary encoded protobuf. In order to calculate the `Content-Length` header value, Envoy - // will buffer the upstream response unless :ref:`response_size_header - // ` - // is set, in which case Envoy will use the value of an upstream header to calculate the content - // length. - bool withhold_grpc_frames = 2; - - // When :ref:`withhold_grpc_frames - // ` - // is true, this option controls how Envoy calculates the `Content-Length`. When - // *response_size_header* is empty, Envoy will buffer the upstream response to calculate its - // size. When *response_size_header* is set to a non-empty string, Envoy will stream the response - // to the downstream and it will use the value of the response header with this name to set the - // `Content-Length` header and gRPC frame size. If the header with this name is repeated, only - // the first value will be used. - // - // Envoy will treat the upstream response as an error if this option is specified and the header - // is missing or if the value does not match the actual response body size. - string response_size_header = 3; -} - -// gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level. -message FilterConfigPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfigPerRoute"; - - // If true, disables gRPC reverse bridge filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto deleted file mode 100644 index a4feeff31f158..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto +++ /dev/null @@ -1,235 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_json_transcoder.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_json_transcoder.v3"; -option java_outer_classname = "TranscoderProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC-JSON transcoder] -// gRPC-JSON transcoder :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_json_transcoder] - -// [#next-free-field: 12] -// GrpcJsonTranscoder filter configuration. -// The filter itself can be used per route / per virtual host or on the general level. The most -// specific one is being used for a given route. If the list of services is empty - filter -// is considered to be disabled. -// Note that if specifying the filter per route, first the route is matched, and then transcoding -// filter is applied. It matters when specifying the route configuration and paths to match the -// request - for per-route grpc transcoder configs, the original path should be matched, while -// in other cases, the grpc-like path is expected (the one AFTER the filter is applied). -message GrpcJsonTranscoder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder"; - - enum UrlUnescapeSpec { - // URL path parameters will not decode RFC 6570 reserved characters. - // For example, segment `%2f%23/%20%2523` is unescaped to `%2f%23/ %23`. - ALL_CHARACTERS_EXCEPT_RESERVED = 0; - - // URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // For example, segment `%2f%23/%20%2523` is unescaped to `%2f#/ %23`. - ALL_CHARACTERS_EXCEPT_SLASH = 1; - - // URL path parameters will be fully URI-decoded. - // For example, segment `%2f%23/%20%2523` is unescaped to `/#/ %23`. - ALL_CHARACTERS = 2; - } - - message PrintOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder.PrintOptions"; - - // Whether to add spaces, line breaks and indentation to make the JSON - // output easy to read. Defaults to false. - bool add_whitespace = 1; - - // Whether to always print primitive fields. By default primitive - // fields with default values will be omitted in JSON output. For - // example, an int32 field set to 0 will be omitted. Setting this flag to - // true will override the default behavior and print primitive fields - // regardless of their values. Defaults to false. - bool always_print_primitive_fields = 2; - - // Whether to always print enums as ints. By default they are rendered - // as strings. Defaults to false. - bool always_print_enums_as_ints = 3; - - // Whether to preserve proto field names. By default protobuf will - // generate JSON field names using the ``json_name`` option, or lower camel case, - // in that order. Setting this flag will preserve the original field names. Defaults to false. - bool preserve_proto_field_names = 4; - } - - message RequestValidationOptions { - // By default, a request that cannot be mapped to any specified gRPC - // :ref:`services ` - // will pass-through this filter. - // When set to true, the request will be rejected with a ``HTTP 404 Not Found``. - bool reject_unknown_method = 1; - - // By default, a request with query parameters that cannot be mapped to the gRPC request message - // will pass-through this filter. - // When set to true, the request will be rejected with a ``HTTP 400 Bad Request``. - // - // The fields - // :ref:`ignore_unknown_query_parameters ` - // and - // :ref:`ignored_query_parameters ` - // have priority over this strict validation behavior. - bool reject_unknown_query_parameters = 2; - } - - oneof descriptor_set { - option (validate.required) = true; - - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - string proto_descriptor = 1; - - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - bytes proto_descriptor_bin = 4; - } - - // A list of strings that - // supplies the fully qualified service names (i.e. "package_name.service_name") that - // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, - // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than - // the service names specified here, but they won't be translated. - // - // By default, the filter will pass through requests that do not map to any specified services. - // If the list of services is empty, filter is considered disabled. - // However, this behavior changes if - // :ref:`reject_unknown_method ` - // is enabled. - repeated string services = 2; - - // Control options for response JSON. These options are passed directly to - // `JsonPrintOptions `_. - PrintOptions print_options = 3; - - // Whether to keep the incoming request route after the outgoing headers have been transformed to - // the match the upstream gRPC service. Note: This means that routes for gRPC services that are - // not transcoded cannot be used in combination with *match_incoming_request_route*. - bool match_incoming_request_route = 5; - - // A list of query parameters to be ignored for transcoding method mapping. - // By default, the transcoder filter will not transcode a request if there are any - // unknown/invalid query parameters. - // - // Example : - // - // .. code-block:: proto - // - // service Bookstore { - // rpc GetShelf(GetShelfRequest) returns (Shelf) { - // option (google.api.http) = { - // get: "/shelves/{shelf}" - // }; - // } - // } - // - // message GetShelfRequest { - // int64 shelf = 1; - // } - // - // message Shelf {} - // - // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable - // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow - // the same request to be mapped to ``GetShelf``. - repeated string ignored_query_parameters = 6; - - // Whether to route methods without the ``google.api.http`` option. - // - // Example : - // - // .. code-block:: proto - // - // package bookstore; - // - // service Bookstore { - // rpc GetShelf(GetShelfRequest) returns (Shelf) {} - // } - // - // message GetShelfRequest { - // int64 shelf = 1; - // } - // - // message Shelf {} - // - // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of - // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. - bool auto_mapping = 7; - - // Whether to ignore query parameters that cannot be mapped to a corresponding - // protobuf field. Use this if you cannot control the query parameters and do - // not know them beforehand. Otherwise use ``ignored_query_parameters``. - // Defaults to false. - bool ignore_unknown_query_parameters = 8; - - // Whether to convert gRPC status headers to JSON. - // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` - // from the ``grpc-status-details-bin`` header and use it as JSON body. - // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and - // ``grpc-message`` headers. - // The error details types must be present in the ``proto_descriptor``. - // - // For example, if an upstream server replies with headers: - // - // .. code-block:: none - // - // grpc-status: 5 - // grpc-status-details-bin: - // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ - // - // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message - // ``google.rpc.Status``. It will be transcoded into: - // - // .. code-block:: none - // - // HTTP/1.1 404 Not Found - // content-type: application/json - // - // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} - // - // In order to transcode the message, the ``google.rpc.RequestInfo`` type from - // the ``google/rpc/error_details.proto`` should be included in the configured - // :ref:`proto descriptor set `. - bool convert_grpc_status = 9; - - // URL unescaping policy. - // This spec is only applied when extracting variable with multiple segments. - // For example, in case of `/foo/{x=*}/bar/{y=prefix/*}/{z=**}` `x` variable is single segment and `y` and `z` are multiple segments. - // For a path with `/foo/first/bar/prefix/second/third/fourth`, `x=first`, `y=prefix/second`, `z=third/fourth`. - // If this setting is not specified, the value defaults to :ref:`ALL_CHARACTERS_EXCEPT_RESERVED`. - UrlUnescapeSpec url_unescape_spec = 10 [(validate.rules).enum = {defined_only: true}]; - - // Configure the behavior when handling requests that cannot be transcoded. - // - // By default, the transcoder will silently pass through HTTP requests that are malformed. - // This includes requests with unknown query parameters, unregister paths, etc. - // - // Set these options to enable strict HTTP request validation, resulting in the transcoder rejecting - // such requests with a ``HTTP 4xx``. See each individual option for more details on the validation. - // gRPC requests will still silently pass through without transcoding. - // - // The benefit is a proper error message to the downstream. - // If the upstream is a gRPC server, it cannot handle the passed-through HTTP requests and will reset - // the TCP connection. The downstream will then - // receive a ``HTTP 503 Service Unavailable`` due to the upstream connection reset. - // This incorrect error message may conflict with other Envoy components, such as retry policies. - RequestValidationOptions request_validation_options = 11; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto deleted file mode 100644 index 79ecb7a92b706..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_stats.v3; - -import "envoy/config/core/v3/grpc_method_list.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_stats.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC statistics] gRPC statistics filter -// :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_stats] - -// gRPC statistics filter configuration -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_stats.v2alpha.FilterConfig"; - - // If true, the filter maintains a filter state object with the request and response message - // counts. - bool emit_filter_state = 1; - - oneof per_method_stat_specifier { - // If set, specifies an allowlist of service/methods that will have individual stats - // emitted for them. Any call that does not match the allowlist will be counted - // in a stat with no method specifier: `cluster..grpc.*`. - config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2; - - // If set to true, emit stats for all service/method names. - // - // If set to false, emit stats for all service/message types to the same stats without including - // the service/method in the name, with prefix `cluster..grpc`. This can be useful if - // service/method granularity is not needed, or if each cluster only receives a single method. - // - // .. attention:: - // This option is only safe if all clients are trusted. If this option is enabled - // with untrusted clients, the clients could cause unbounded growth in the number of stats in - // Envoy, using unbounded memory and potentially slowing down stats pipelines. - // - // .. attention:: - // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the - // behavior will default to `stats_for_all_methods=false`. This default value is changed due - // to the previous value being deprecated. This behavior can be changed with runtime override - // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. - google.protobuf.BoolValue stats_for_all_methods = 3; - } - - // If true, the filter will gather a histogram for the request time of the upstream. - // It works with :ref:`stats_for_all_methods - // ` - // and :ref:`individual_method_stats_allowlist - // ` the same way - // request_message_count and response_message_count works. - bool enable_upstream_stats = 4; -} - -// gRPC statistics filter state object in protobuf form. -message FilterObject { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_stats.v2alpha.FilterObject"; - - // Count of request messages in the request stream. - uint64 request_message_count = 1; - - // Count of response messages in the response stream. - uint64 response_message_count = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto deleted file mode 100644 index 8161139f547b5..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_web.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_web.v3"; -option java_outer_classname = "GrpcWebProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC Web] -// gRPC Web :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_web] - -// gRPC Web filter config. -message GrpcWeb { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_web.v2.GrpcWeb"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD deleted file mode 100644 index bfe5d198e6129..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/filters/http/compressor/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto deleted file mode 100644 index a931ab78689ff..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto +++ /dev/null @@ -1,81 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.gzip.v3; - -import "envoy/extensions/filters/http/compressor/v3/compressor.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.gzip.v3"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Gzip] - -// [#next-free-field: 12] -message Gzip { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.gzip.v2.Gzip"; - - enum CompressionStrategy { - DEFAULT = 0; - FILTERED = 1; - HUFFMAN = 2; - RLE = 3; - } - - message CompressionLevel { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.gzip.v2.Gzip.CompressionLevel"; - - enum Enum { - DEFAULT = 0; - BEST = 1; - SPEED = 2; - } - } - - reserved 2, 6, 7, 8; - - reserved "content_length", "content_type", "disable_on_etag_header", - "remove_accept_encoding_header"; - - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; - - // A value used for selecting the zlib compression level. This setting will affect speed and - // amount of compression applied to the content. "BEST" provides higher compression at the cost of - // higher latency, "SPEED" provides lower compression with minimum impact on response time. - // "DEFAULT" provides an optimal result between speed and compression. This field will be set to - // "DEFAULT" if not specified. - CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; - - // A value used for selecting the zlib compression strategy which is directly related to the - // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though - // there are situations which changing this parameter might produce better results. For example, - // run-length encoding (RLE) is typically used when the content is known for having sequences - // which same data occurs many consecutive times. For more information about each strategy, please - // refer to zlib manual. - CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; - - // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 12 - // which will produce a 4096 bytes window. For more details about this parameter, please refer to - // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Set of configuration parameters common for all compression filters. You can define - // `content_length`, `content_type` and other parameters in this field. - compressor.v3.Compressor compressor = 10; - - // Value for Zlib's next output buffer. If not set, defaults to 4096. - // See https://www.zlib.net/manual.html for more details. Also see - // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. - google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD deleted file mode 100644 index 693f0b92ff34d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto deleted file mode 100644 index 5e399790a7eca..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ /dev/null @@ -1,132 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.header_to_metadata.v3; - -import "envoy/type/matcher/v3/regex.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v3"; -option java_outer_classname = "HeaderToMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Header-To-Metadata Filter] -// -// The configuration for transforming headers into metadata. This is useful -// for matching load balancer subsets, logging, etc. -// -// Header to Metadata :ref:`configuration overview `. -// [#extension: envoy.filters.http.header_to_metadata] - -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.header_to_metadata.v2.Config"; - - enum ValueType { - STRING = 0; - - NUMBER = 1; - - // The value is a serialized `protobuf.Value - // `_. - PROTOBUF_VALUE = 2; - } - - // ValueEncode defines the encoding algorithm. - enum ValueEncode { - // The value is not encoded. - NONE = 0; - - // The value is encoded in `Base64 `_. - // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the - // non-ASCII characters in the header. - BASE64 = 1; - } - - // [#next-free-field: 7] - message KeyValuePair { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.header_to_metadata.v2.Config.KeyValuePair"; - - // The namespace — if this is empty, the filter's namespace will be used. - string metadata_namespace = 1; - - // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_len: 1}]; - - // The value to pair with the given key. - // - // When used for a - // :ref:`on_header_present ` - // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. - // - // When used for a :ref:`on_header_missing ` - // case, a non-empty value must be provided otherwise no metadata is added. - string value = 3 [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; - - // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value - // is used as-is. - // - // This is only used for :ref:`on_header_present `. - // - // Note: if the `value` field is non-empty this field should be empty. - type.matcher.v3.RegexMatchAndSubstitute regex_value_rewrite = 6 - [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; - - // The value's type — defaults to string. - ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; - - // How is the value encoded, default is NONE (not encoded). - // The value will be decoded accordingly before storing to metadata. - ValueEncode encode = 5; - } - - // A Rule defines what metadata to apply when a header is present or missing. - // [#next-free-field: 6] - message Rule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.header_to_metadata.v2.Config.Rule"; - - // Specifies that a match will be performed on the value of a header or a cookie. - // - // The header to be extracted. - string header = 1 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, - (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" - ]; - - // The cookie to be extracted. - string cookie = 5 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, - (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" - ]; - - // If the header or cookie is present, apply this metadata KeyValuePair. - // - // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header or cookie value. - KeyValuePair on_header_present = 2 [(udpa.annotations.field_migrate).rename = "on_present"]; - - // If the header or cookie is not present, apply this metadata KeyValuePair. - // - // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header or cookie value. - KeyValuePair on_header_missing = 3 [(udpa.annotations.field_migrate).rename = "on_missing"]; - - // Whether or not to remove the header after a rule is applied. - // - // This prevents headers from leaking. - // This field is not supported in case of a cookie. - bool remove = 4; - } - - // The list of rules to apply to requests. - repeated Rule request_rules = 1; - - // The list of rules to apply to responses. - repeated Rule response_rules = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD deleted file mode 100644 index c6ef74063aabe..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto deleted file mode 100644 index f3a0c42c388c6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.health_check.v3; - -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v3"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Health check] -// Health check :ref:`configuration overview `. -// [#extension: envoy.filters.http.health_check] - -// [#next-free-field: 6] -message HealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.health_check.v2.HealthCheck"; - - reserved 2; - - // Specifies whether the filter operates in pass through mode or not. - google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; - - // If operating in pass through mode, the amount of time in milliseconds - // that the filter should cache the upstream response. - google.protobuf.Duration cache_time = 3; - - // If operating in non-pass-through mode, specifies a set of upstream cluster - // names and the minimum percentage of servers in each of those clusters that - // must be healthy or degraded in order for the filter to return a 200. - // - // .. note:: - // - // This value is interpreted as an integer by truncating, so 12.50% will be calculated - // as if it were 12%. - map cluster_min_healthy_percentages = 4; - - // Specifies a set of health check request headers to match on. The health check filter will - // check a request’s headers against all the specified headers. To specify the health check - // endpoint, set the ``:path`` header to match on. - repeated config.route.v3.HeaderMatcher headers = 5; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto b/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto deleted file mode 100644 index a23ad9dea0a90..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto +++ /dev/null @@ -1,61 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ip_tagging.v3; - -import "envoy/config/core/v3/address.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ip_tagging.v3"; -option java_outer_classname = "IpTaggingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: IP tagging] -// IP tagging :ref:`configuration overview `. -// [#extension: envoy.filters.http.ip_tagging] - -message IPTagging { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ip_tagging.v2.IPTagging"; - - // The type of requests the filter should apply to. The supported types - // are internal, external or both. The - // :ref:`x-forwarded-for` header is - // used to determine if a request is internal and will result in - // :ref:`x-envoy-internal` - // being set. The filter defaults to both, and it will apply to all request types. - enum RequestType { - // Both external and internal requests will be tagged. This is the default value. - BOTH = 0; - - // Only internal requests will be tagged. - INTERNAL = 1; - - // Only external requests will be tagged. - EXTERNAL = 2; - } - - // Supplies the IP tag name and the IP address subnets. - message IPTag { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ip_tagging.v2.IPTagging.IPTag"; - - // Specifies the IP tag name to apply. - string ip_tag_name = 1; - - // A list of IP address subnets that will be tagged with - // ip_tag_name. Both IPv4 and IPv6 are supported. - repeated config.core.v3.CidrRange ip_list = 2; - } - - // The type of request the filter should apply to. - RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. - // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] - // The set of IP tags for the filter. - repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/BUILD deleted file mode 100644 index 6eb33fe8151ad..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto deleted file mode 100644 index 9718dbe0550ab..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ /dev/null @@ -1,678 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.jwt_authn.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/http_uri.proto"; -import "envoy/config/route/v3/route_components.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: JWT Authentication] -// JWT Authentication :ref:`configuration overview `. -// [#extension: envoy.filters.http.jwt_authn] - -// Please see following for JWT authentication flow: -// -// * `JSON Web Token (JWT) `_ -// * `The OAuth 2.0 Authorization Framework `_ -// * `OpenID Connect `_ -// -// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: -// -// * issuer: the principal that issues the JWT. If specified, it has to match the *iss* field in JWT. -// * allowed audiences: the ones in the token have to be listed here. -// * how to fetch public key JWKS to verify the token signature. -// * how to extract JWT token in the request. -// * how to pass successfully verified token payload. -// -// Example: -// -// .. code-block:: yaml -// -// issuer: https://example.com -// audiences: -// - bookstore_android.apps.googleusercontent.com -// - bookstore_web.apps.googleusercontent.com -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// timeout: 1s -// cache_duration: -// seconds: 300 -// -// [#next-free-field: 14] -message JwtProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; - - // Specify the `principal `_ that issued - // the JWT, usually a URL or an email address. - // - // It is optional. If specified, it has to match the *iss* field in JWT. - // - // If a JWT has *iss* field and this field is specified, they have to match, otherwise the - // JWT *iss* field is not checked. - // - // Note: *JwtRequirement* :ref:`allow_missing ` - // and :ref:`allow_missing_or_failed ` - // are implemented differently than other *JwtRequirements*. Hence the usage of this field - // is different as follows if *allow_missing* or *allow_missing_or_failed* is used: - // - // * If a JWT has *iss* field, it needs to be specified by this field in one of *JwtProviders*. - // * If a JWT doesn't have *iss* field, one of *JwtProviders* should fill this field empty. - // * Multiple *JwtProviders* should not have same value in this field. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com - // - string issuer = 1; - - // The list of JWT `audiences `_ are - // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, - // will not check audiences in the token. - // - // Example: - // - // .. code-block:: yaml - // - // audiences: - // - bookstore_android.apps.googleusercontent.com - // - bookstore_web.apps.googleusercontent.com - // - repeated string audiences = 2; - - // `JSON Web Key Set (JWKS) `_ is needed to - // validate signature of a JWT. This field specifies where to fetch JWKS. - oneof jwks_source_specifier { - option (validate.required) = true; - - // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP - // URI and how the fetched JWKS should be cached. - // - // Example: - // - // .. code-block:: yaml - // - // remote_jwks: - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // timeout: 1s - // cache_duration: - // seconds: 300 - // - RemoteJwks remote_jwks = 3; - - // JWKS is in local data source. It could be either in a local file or embedded in the - // inline_string. - // - // Example: local file - // - // .. code-block:: yaml - // - // local_jwks: - // filename: /etc/envoy/jwks/jwks1.txt - // - // Example: inline_string - // - // .. code-block:: yaml - // - // local_jwks: - // inline_string: ACADADADADA - // - config.core.v3.DataSource local_jwks = 4; - } - - // If false, the JWT is removed in the request after a success verification. If true, the JWT is - // not removed in the request. Default value is false. - bool forward = 5; - - // Two fields below define where to extract the JWT from an HTTP request. - // - // If no explicit location is specified, the following default locations are tried in order: - // - // 1. The Authorization header using the `Bearer schema - // `_. Example:: - // - // Authorization: Bearer . - // - // 2. `access_token `_ query parameter. - // - // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations - // its provider specified or from the default locations. - // - // Specify the HTTP headers to extract JWT token. For examples, following config: - // - // .. code-block:: yaml - // - // from_headers: - // - name: x-goog-iap-jwt-assertion - // - // can be used to extract token from header:: - // - // ``x-goog-iap-jwt-assertion: ``. - // - repeated JwtHeader from_headers = 6; - - // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. - // - // For example, if config is: - // - // .. code-block:: yaml - // - // from_params: - // - jwt_token - // - // The JWT format in query parameter is:: - // - // /path?jwt_token= - // - repeated string from_params = 7; - - // JWT is sent in a cookie. `from_cookies` represents the cookie names to extract from. - // - // For example, if config is: - // - // .. code-block:: yaml - // - // from_cookies: - // - auth-token - // - // Then JWT will be extracted from `auth-token` cookie in the request. - // - repeated string from_cookies = 13; - - // This field specifies the header name to forward a successfully verified JWT payload to the - // backend. The forwarded data is:: - // - // base64url_encoded(jwt_payload_in_JSON) - // - // If it is not specified, the payload will not be forwarded. - string forward_payload_header = 8 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // When :ref:`forward_payload_header ` - // is specified, the base64 encoded payload will be added to the headers. - // Normally JWT based64 encode doesn't add padding. If this field is true, - // the header will be padded. - // - // This field is only relevant if :ref:`forward_payload_header ` - // is specified. - bool pad_forward_payload_header = 11; - - // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata - // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** - // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* - // and the value is the *protobuf::Struct* converted from JWT JSON payload. - // - // For example, if payload_in_metadata is *my_payload*: - // - // .. code-block:: yaml - // - // envoy.filters.http.jwt_authn: - // my_payload: - // iss: https://example.com - // sub: test@example.com - // aud: https://example.com - // exp: 1501281058 - // - string payload_in_metadata = 9; - - // Specify the clock skew in seconds when verifying JWT time constraint, - // such as `exp`, and `nbf`. If not specified, default is 60 seconds. - uint32 clock_skew_seconds = 10; - - // Enables JWT cache, its size is specified by *jwt_cache_size*. - // Only valid JWT tokens are cached. - JwtCacheConfig jwt_cache_config = 12; -} - -// This message specifies JWT Cache configuration. -message JwtCacheConfig { - // The unit is number of JWT tokens, default to 100. - uint32 jwt_cache_size = 1; -} - -// This message specifies how to fetch JWKS from remote and how to cache it. -message RemoteJwks { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.RemoteJwks"; - - // The HTTP URI to fetch the JWKS. For example: - // - // .. code-block:: yaml - // - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // timeout: 1s - // - config.core.v3.HttpUri http_uri = 1; - - // Duration after which the cached JWKS should be expired. If not specified, default cache - // duration is 5 minutes. - google.protobuf.Duration cache_duration = 2; - - // Fetch Jwks asynchronously in the main thread before the listener is activated. - // Fetched Jwks can be used by all worker threads. - // - // If this feature is not enabled: - // - // * The Jwks is fetched on-demand when the requests come. During the fetching, first - // few requests are paused until the Jwks is fetched. - // * Each worker thread fetches its own Jwks since Jwks cache is per worker thread. - // - // If this feature is enabled: - // - // * Fetched Jwks is done in the main thread before the listener is activated. Its fetched - // Jwks can be used by all worker threads. Each worker thread doesn't need to fetch its own. - // * Jwks is ready when the requests come, not need to wait for the Jwks fetching. - // - JwksAsyncFetch async_fetch = 3; - - // Retry policy for fetching Jwks. optional. turned off by default. - // - // For example: - // - // .. code-block:: yaml - // - // retry_policy: - // retry_back_off: - // base_interval: 0.01s - // max_interval: 20s - // num_retries: 10 - // - // will yield a randomized truncated exponential backoff policy with an initial delay of 10ms - // 10 maximum attempts spaced at most 20s seconds. - // - // .. code-block:: yaml - // - // retry_policy: - // num_retries:1 - // - // uses the default :ref:`retry backoff strategy `. - // with the default base interval is 1000 milliseconds. and the default maximum interval of 10 times the base interval. - // - // if num_retries is omitted, the default is to allow only one retry. - // - // - // If enabled, the retry policy will apply to all Jwks fetching approaches, e.g. on demand or asynchronously in background. - // - // - config.core.v3.RetryPolicy retry_policy = 4; -} - -// Fetch Jwks asynchronously in the main thread when the filter config is parsed. -// The listener is activated only after the Jwks is fetched. -// When the Jwks is expired in the cache, it is fetched again in the main thread. -// The fetched Jwks from the main thread can be used by all worker threads. -message JwksAsyncFetch { - // If false, the listener is activated after the initial fetch is completed. - // The initial fetch result can be either successful or failed. - // If true, it is activated without waiting for the initial fetch to complete. - // Default is false. - bool fast_listener = 1; -} - -// This message specifies a header location to extract JWT token. -message JwtHeader { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtHeader"; - - // The HTTP header name. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The value prefix. The value format is "value_prefix" - // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the - // end. - string value_prefix = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; -} - -// Specify a required provider with audiences. -message ProviderWithAudiences { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.ProviderWithAudiences"; - - // Specify a required provider name. - string provider_name = 1; - - // This field overrides the one specified in the JwtProvider. - repeated string audiences = 2; -} - -// This message specifies a Jwt requirement. An empty message means JWT verification is not -// required. Here are some config examples: -// -// .. code-block:: yaml -// -// # Example 1: not required with an empty message -// -// # Example 2: require A -// provider_name: provider-A -// -// # Example 3: require A or B -// requires_any: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 4: require A and B -// requires_all: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 5: require A and (B or C) -// requires_all: -// requirements: -// - provider_name: provider-A -// - requires_any: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 6: require A or (B and C) -// requires_any: -// requirements: -// - provider_name: provider-A -// - requires_all: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows -// missing token.) -// requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// -// # Example 8: A is optional and B is required. -// requires_all: -// requirements: -// - requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// - provider_name: provider-B -// -// [#next-free-field: 7] -message JwtRequirement { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirement"; - - oneof requires_type { - // Specify a required provider name. - string provider_name = 1; - - // Specify a required provider with audiences. - ProviderWithAudiences provider_and_audiences = 2; - - // Specify list of JwtRequirement. Their results are OR-ed. - // If any one of them passes, the result is passed. - JwtRequirementOrList requires_any = 3; - - // Specify list of JwtRequirement. Their results are AND-ed. - // All of them must pass, if one of them fails or missing, it fails. - JwtRequirementAndList requires_all = 4; - - // The requirement is always satisfied even if JWT is missing or the JWT - // verification fails. A typical usage is: this filter is used to only verify - // JWTs and pass the verified JWT payloads to another filter, the other filter - // will make decision. In this mode, all JWT tokens will be verified. - google.protobuf.Empty allow_missing_or_failed = 5; - - // The requirement is satisfied if JWT is missing, but failed if JWT is - // presented but invalid. Similar to allow_missing_or_failed, this is used - // to only verify JWTs and pass the verified payload to another filter. The - // different is this mode will reject requests with invalid tokens. - google.protobuf.Empty allow_missing = 6; - } -} - -// This message specifies a list of RequiredProvider. -// Their results are OR-ed; if any one of them passes, the result is passed -message JwtRequirementOrList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementOrList"; - - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a list of RequiredProvider. -// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. -message JwtRequirementAndList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementAndList"; - - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a Jwt requirement for a specific Route condition. -// Example 1: -// -// .. code-block:: yaml -// -// - match: -// prefix: /healthz -// -// In above example, "requires" field is empty for /healthz prefix match, -// it means that requests matching the path prefix don't require JWT authentication. -// -// Example 2: -// -// .. code-block:: yaml -// -// - match: -// prefix: / -// requires: { provider_name: provider-A } -// -// In above example, all requests matched the path prefix require jwt authentication -// from "provider-A". -message RequirementRule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.RequirementRule"; - - // The route matching parameter. Only when the match is satisfied, the "requires" field will - // apply. - // - // For example: following match will match all requests. - // - // .. code-block:: yaml - // - // match: - // prefix: / - // - config.route.v3.RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Specify a Jwt requirement. - // If not specified, Jwt verification is disabled. - oneof requirement_type { - // Specify a Jwt requirement. Please see detail comment in message JwtRequirement. - JwtRequirement requires = 2; - - // Use requirement_name to specify a Jwt requirement. - // This requirement_name MUST be specified at the - // :ref:`requirement_map ` - // in `JwtAuthentication`. - string requirement_name = 3 [(validate.rules).string = {min_len: 1}]; - } -} - -// This message specifies Jwt requirements based on stream_info.filterState. -// This FilterState should use `Router::StringAccessor` object to set a string value. -// Other HTTP filters can use it to specify Jwt requirements dynamically. -// -// Example: -// -// .. code-block:: yaml -// -// name: jwt_selector -// requires: -// issuer_1: -// provider_name: issuer1 -// issuer_2: -// provider_name: issuer2 -// -// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, -// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. -message FilterStateRule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.FilterStateRule"; - - // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A map of string keys to requirements. The string key is the string value - // in the FilterState with the name specified in the *name* field above. - map requires = 3; -} - -// This is the Envoy HTTP filter config for JWT authentication. -// -// For example: -// -// .. code-block:: yaml -// -// providers: -// provider1: -// issuer: issuer1 -// audiences: -// - audience1 -// - audience2 -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// timeout: 1s -// provider2: -// issuer: issuer2 -// local_jwks: -// inline_string: jwks_string -// -// rules: -// # Not jwt verification is required for /health path -// - match: -// prefix: /health -// -// # Jwt verification for provider1 is required for path prefixed with "prefix" -// - match: -// prefix: /prefix -// requires: -// provider_name: provider1 -// -// # Jwt verification for either provider1 or provider2 is required for all other requests. -// - match: -// prefix: / -// requires: -// requires_any: -// requirements: -// - provider_name: provider1 -// - provider_name: provider2 -// -// [#next-free-field: 6] -message JwtAuthentication { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtAuthentication"; - - // Map of provider names to JwtProviders. - // - // .. code-block:: yaml - // - // providers: - // provider1: - // issuer: issuer1 - // audiences: - // - audience1 - // - audience2 - // remote_jwks: - // http_uri: - // uri: https://example.com/.well-known/jwks.json - // cluster: example_jwks_cluster - // timeout: 1s - // provider2: - // issuer: provider2 - // local_jwks: - // inline_string: jwks_string - // - map providers = 1; - - // Specifies requirements based on the route matches. The first matched requirement will be - // applied. If there are overlapped match conditions, please put the most specific match first. - // - // Examples - // - // .. code-block:: yaml - // - // rules: - // - match: - // prefix: /healthz - // - match: - // prefix: /baz - // requires: - // provider_name: provider1 - // - match: - // prefix: /foo - // requires: - // requires_any: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - match: - // prefix: /bar - // requires: - // requires_all: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - repeated RequirementRule rules = 2; - - // This message specifies Jwt requirements based on stream_info.filterState. - // Other HTTP filters can use it to specify Jwt requirements dynamically. - // The *rules* field above is checked first, if it could not find any matches, - // check this one. - FilterStateRule filter_state_rules = 3; - - // When set to true, bypass the `CORS preflight request - // `_ regardless of JWT - // requirements specified in the rules. - bool bypass_cors_preflight = 4; - - // A map of unique requirement_names to JwtRequirements. - // :ref:`requirement_name ` - // in `PerRouteConfig` uses this map to specify a JwtRequirement. - map requirement_map = 5; -} - -// Specify per-route config. -message PerRouteConfig { - oneof requirement_specifier { - option (validate.required) = true; - - // Disable Jwt Authentication for this route. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Use requirement_name to specify a JwtRequirement. - // This requirement_name MUST be specified at the - // :ref:`requirement_map ` - // in `JwtAuthentication`. If no, the requests using this route will be rejected with 403. - string requirement_name = 2 [(validate.rules).string = {min_len: 1}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/BUILD deleted file mode 100644 index 9a76b7e148e03..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/kill_request.proto b/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/kill_request.proto deleted file mode 100644 index a0a23b0de3a34..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/kill_request.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.kill_request.v3; - -import "envoy/type/v3/percent.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.kill_request.v3"; -option java_outer_classname = "KillRequestProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Kill Request] -// Kill Request :ref:`configuration overview `. -// [#extension: envoy.filters.http.kill_request] - -// Configuration for KillRequest filter. -message KillRequest { - // On which direction should the filter check for the `kill_request_header`. - // Default to `REQUEST` if unspecified. - enum Direction { - REQUEST = 0; - RESPONSE = 1; - } - - // The probability that a Kill request will be triggered. - type.v3.FractionalPercent probability = 1; - - // The name of the kill request header. If this field is not empty, it will override the :ref:`default header ` name. Otherwise the default header name will be used. - string kill_request_header = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - Direction direction = 3 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD deleted file mode 100644 index 6c58a43e4ff6b..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto deleted file mode 100644 index 1cf6c5f2fa52c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto +++ /dev/null @@ -1,109 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.local_ratelimit.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; -import "envoy/type/v3/http_status.proto"; -import "envoy/type/v3/token_bucket.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.local_ratelimit.v3"; -option java_outer_classname = "LocalRateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Local Rate limit] -// Local Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.local_ratelimit] - -// [#next-free-field: 12] -message LocalRateLimit { - // The human readable prefix to use when emitting stats. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // This field allows for a custom HTTP response status code to the downstream client when - // the request has been rate limited. - // Defaults to 429 (TooManyRequests). - // - // .. note:: - // If this is set to < 400, 429 will be used instead. - type.v3.HttpStatus status = 2; - - // The token bucket configuration to use for rate limiting requests that are processed by this - // filter. Each request processed by the filter consumes a single token. If the token is available, - // the request will be allowed. If no tokens are available, the request will receive the configured - // rate limit status. - // - // .. note:: - // It's fine for the token bucket to be unset for the global configuration since the rate limit - // can be applied at a the virtual host or route level. Thus, the token bucket must be set - // for the per route configuration otherwise the config will be rejected. - // - // .. note:: - // When using per route configuration, the bucket becomes unique to that route. - // - // .. note:: - // In the current implementation the token bucket's :ref:`fill_interval - // ` must be >= 50ms to avoid too aggressive - // refills. - type.v3.TokenBucket token_bucket = 3; - - // If set, this will enable -- but not necessarily enforce -- the rate limit for the given - // fraction of requests. - // Defaults to 0% of requests for safety. - config.core.v3.RuntimeFractionalPercent filter_enabled = 4; - - // If set, this will enforce the rate limit decisions for the given fraction of requests. - // - // Note: this only applies to the fraction of enabled requests. - // - // Defaults to 0% of requests for safety. - config.core.v3.RuntimeFractionalPercent filter_enforced = 5; - - // Specifies a list of HTTP headers that should be added to each request that - // has been rate limited and is also forwarded upstream. This can only occur when the - // filter is enabled but not enforced. - repeated config.core.v3.HeaderValueOption request_headers_to_add_when_not_enforced = 10 - [(validate.rules).repeated = {max_items: 10}]; - - // Specifies a list of HTTP headers that should be added to each response for requests that - // have been rate limited. This occurs when the filter is either enabled or fully enforced. - repeated config.core.v3.HeaderValueOption response_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 10}]; - - // The rate limit descriptor list to use in the local rate limit to override - // on. The rate limit descriptor is selected by the first full match from the - // request descriptors. - // - // Example on how to use ::ref:`this ` - // - // .. note:: - // - // In the current implementation the descriptor's token bucket :ref:`fill_interval - // ` must be a multiple - // global :ref:`token bucket's` fill interval. - // - // The descriptors must match verbatim for rate limiting to apply. There is no partial - // match by a subset of descriptor entries in the current implementation. - repeated common.ratelimit.v3.LocalRateLimitDescriptor descriptors = 8; - - // Specifies the rate limit configurations to be applied with the same - // stage number. If not set, the default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 9 [(validate.rules).uint32 = {lte: 10}]; - - // Specifies the scope of the rate limiter's token bucket. - // If set to false, the token bucket is shared across all worker threads, - // thus the rate limits are applied per Envoy process. - // If set to true, a token bucket is allocated for each connection. - // Thus the rate limits are applied per connection thereby allowing - // one to rate limit requests on a per connection basis. - // If unspecified, the default value is false. - bool local_rate_limit_per_downstream_connection = 11; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto deleted file mode 100644 index 1636c01ab1c75..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.lua.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.lua.v3"; -option java_outer_classname = "LuaProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Lua] -// Lua :ref:`configuration overview `. -// [#extension: envoy.filters.http.lua] - -message Lua { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.lua.v2.Lua"; - - // The Lua code that Envoy will execute. This can be a very small script that - // further loads code from disk if desired. Note that if JSON configuration is used, the code must - // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line - // strings so complex scripts can be easily expressed inline in the configuration. - string inline_code = 1 [(validate.rules).string = {min_len: 1}]; - - // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute - // `. The Lua source codes can be - // loaded from inline string or local files. - // - // Example: - // - // .. code-block:: yaml - // - // source_codes: - // hello.lua: - // inline_string: | - // function envoy_on_response(response_handle) - // -- Do something. - // end - // world.lua: - // filename: /etc/lua/world.lua - // - map source_codes = 2; -} - -message LuaPerRoute { - oneof override { - option (validate.required) = true; - - // Disable the Lua filter for this particular vhost or route. If disabled is specified in - // multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // A name of a Lua source code stored in - // :ref:`Lua.source_codes `. - string name = 2 [(validate.rules).string = {min_len: 1}]; - - // A configured per-route Lua source code that can be served by RDS or provided inline. - config.core.v3.DataSource source_code = 3; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/BUILD deleted file mode 100644 index 75d36b709935c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto b/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto deleted file mode 100644 index 27e709f7a8d6c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.on_demand.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.on_demand.v3"; -option java_outer_classname = "OnDemandProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: OnDemand] -// IP tagging :ref:`configuration overview `. -// [#extension: envoy.filters.http.on_demand] - -message OnDemand { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.on_demand.v2.OnDemand"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto b/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto deleted file mode 100644 index ca752b4c75ce6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.original_src.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.original_src.v3"; -option java_outer_classname = "OriginalSrcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Original Src Filter] -// Use the Original source address on upstream connections. - -// The Original Src filter binds upstream connections to the original source address determined -// for the request. This address could come from something like the Proxy Protocol filter, or it -// could come from trusted http headers. -// [#extension: envoy.filters.http.original_src] -message OriginalSrc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.original_src.v2alpha1.OriginalSrc"; - - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to - // ensure that non-local addresses may be routed back through envoy when binding to the original - // source address. The option will not be applied if the mark is 0. - uint32 mark = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/BUILD deleted file mode 100644 index 0bad14913d217..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto deleted file mode 100644 index bc58e7f9b2e1a..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ /dev/null @@ -1,122 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ratelimit.v3; - -import "envoy/config/ratelimit/v3/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ratelimit.v3"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.ratelimit] - -// [#next-free-field: 10] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.rate_limit.v2.RateLimit"; - - // Defines the version of the standard to use for X-RateLimit headers. - enum XRateLimitHeadersRFCVersion { - // X-RateLimit headers disabled. - OFF = 0; - - // Use `draft RFC Version 03 `_. - DRAFT_VERSION_03 = 1; - } - - // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string = {min_len: 1}]; - - // Specifies the rate limit configurations to be applied with the same - // stage number. If not set, the default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The type of requests the filter should apply to. The supported - // types are *internal*, *external* or *both*. A request is considered internal if - // :ref:`x-envoy-internal` is set to true. If - // :ref:`x-envoy-internal` is not set or false, a - // request is considered external. The filter defaults to *both*, and it will apply to all request - // types. - string request_type = 3 - [(validate.rules).string = {in: "internal" in: "external" in: "both" in: ""}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - bool failure_mode_deny = 5; - - // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead - // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The - // HTTP code will be 200 for a gRPC response. - bool rate_limited_as_resource_exhausted = 6; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 7 - [(validate.rules).message = {required: true}]; - - // Defines the standard version to use for X-RateLimit headers emitted by the filter: - // - // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the - // client in the current time-window followed by the description of the - // quota policy. The values are returned by the rate limiting service in - // :ref:`current_limit` - // field. Example: `10, 10;w=1;name="per-ip", 1000;w=3600`. - // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the - // current time-window. The values are returned by the rate limiting service - // in :ref:`limit_remaining` - // field. - // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of - // the current time-window. The values are returned by the rate limiting service - // in :ref:`duration_until_reset` - // field. - // - // In case rate limiting policy specifies more then one time window, the values - // above represent the window that is closest to reaching its limit. - // - // For more information about the headers specification see selected version of - // the `draft RFC `_. - // - // Disabled by default. - XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 - [(validate.rules).enum = {defined_only: true}]; - - // Disables emitting the :ref:`x-envoy-ratelimited` header - // in case of rate limiting (i.e. 429 responses). - // Having this header not present potentially makes the request retriable. - bool disable_x_envoy_ratelimited_header = 9; -} - -message RateLimitPerRoute { - enum VhRateLimitsOptions { - // Use the virtual host rate limits unless the route has a rate limit policy. - OVERRIDE = 0; - - // Use the virtual host rate limits even if the route has a rate limit policy. - INCLUDE = 1; - - // Ignore the virtual host rate limits even if the route does not have a rate limit policy. - IGNORE = 2; - } - - // Specifies if the rate limit filter should include the virtual host rate limits. - VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/BUILD deleted file mode 100644 index fd183569e5a1e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto b/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto deleted file mode 100644 index 7ad7ac5e6aa25..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.rbac.v3; - -import "envoy/config/rbac/v3/rbac.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v3"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.http.rbac] - -// RBAC filter config. -message RBAC { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.rbac.v2.RBAC"; - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - // If present and empty, DENY. - config.rbac.v3.RBAC rules = 1; - - // Shadow rules are not enforced by the filter (i.e., returning a 403) - // but will emit stats and logs and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v3.RBAC shadow_rules = 2; - - // If specified, shadow rules will emit stats with the given prefix. - // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with - // shadow rules. - string shadow_rules_stat_prefix = 3; -} - -message RBACPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.rbac.v2.RBACPerRoute"; - - reserved 1; - - // Override the global configuration of the filter with this new config. - // If absent, the global RBAC policy will be disabled for this route. - RBAC rbac = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/router/v3/BUILD deleted file mode 100644 index 0b02b988e42ff..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/router/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto b/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto deleted file mode 100644 index ce595c057c01f..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto +++ /dev/null @@ -1,91 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.router.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v3"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Router] -// Router :ref:`configuration overview `. -// [#extension: envoy.filters.http.router] - -// [#next-free-field: 8] -message Router { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.router.v2.Router"; - - // Whether the router generates dynamic cluster statistics. Defaults to - // true. Can be disabled in high performance scenarios. - google.protobuf.BoolValue dynamic_stats = 1; - - // Whether to start a child span for egress routed calls. This can be - // useful in scenarios where other filters (auth, ratelimit, etc.) make - // outbound calls and have child spans rooted at the same ingress - // parent. Defaults to false. - bool start_child_span = 2; - - // Configuration for HTTP upstream logs emitted by the router. Upstream logs - // are configured in the same way as access logs, but each log entry represents - // an upstream request. Presuming retries are configured, multiple upstream - // requests may be made for each downstream (inbound) request. - repeated config.accesslog.v3.AccessLog upstream_log = 3; - - // Do not add any additional *x-envoy-* headers to requests or responses. This - // only affects the :ref:`router filter generated *x-envoy-* headers - // `, other Envoy filters and the HTTP - // connection manager may continue to set *x-envoy-* headers. - bool suppress_envoy_headers = 4; - - // Specifies a list of HTTP headers to strictly validate. Envoy will reject a - // request and respond with HTTP status 400 if the request contains an invalid - // value for any of the headers listed in this field. Strict header checking - // is only supported for the following headers: - // - // Value must be a ','-delimited list (i.e. no spaces) of supported retry - // policy values: - // - // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` - // * :ref:`config_http_filters_router_x-envoy-retry-on` - // - // Value must be an integer: - // - // * :ref:`config_http_filters_router_x-envoy-max-retries` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - repeated string strict_check_headers = 5 [(validate.rules).repeated = { - items { - string { - in: "x-envoy-upstream-rq-timeout-ms" - in: "x-envoy-upstream-rq-per-try-timeout-ms" - in: "x-envoy-max-retries" - in: "x-envoy-retry-grpc-on" - in: "x-envoy-retry-on" - } - } - }]; - - // If not set, ingress Envoy will ignore - // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress - // Envoy, when deriving timeout for upstream cluster. - bool respect_expected_rq_timeout = 6; - - // If set, Envoy will avoid incrementing HTTP failure code stats - // on gRPC requests. This includes the individual status code value - // (e.g. upstream_rq_504) and group stats (e.g. upstream_rq_5xx). - // This field is useful if interested in relying only on the gRPC - // stats filter to define success and failure metrics for gRPC requests - // as not all failed gRPC requests charge HTTP status code metrics. See - // :ref:`gRPC stats filter` documentation - // for more details. - bool suppress_grpc_request_failure_code_stats = 7; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/set_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/set_metadata.proto deleted file mode 100644 index f7ff348e20255..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/set_metadata.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.set_metadata.v3; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.set_metadata.v3"; -option java_outer_classname = "SetMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Set-Metadata Filter] -// -// This filters adds or updates dynamic metadata with static data. -// -// [#extension: envoy.filters.http.set_metadata] - -message Config { - // The metadata namespace. - string metadata_namespace = 1 [(validate.rules).string = {min_len: 1}]; - - // The value to update the namespace with. See - // :ref:`the filter documentation ` for - // more information on how this value is merged with potentially existing - // ones. - google.protobuf.Struct value = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/tap/v3/BUILD deleted file mode 100644 index 6b2b1215048c6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto b/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto deleted file mode 100644 index 81779443e4a54..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.tap.v3; - -import "envoy/extensions/common/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v3"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap] -// Tap :ref:`configuration overview `. -// [#extension: envoy.filters.http.tap] - -// Top level configuration for the tap filter. -message Tap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.tap.v2alpha.Tap"; - - // Common configuration for the HTTP tap filter. - common.tap.v3.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD deleted file mode 100644 index c37174bdefc46..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/wasm/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto deleted file mode 100644 index a0cfcae1afb57..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.wasm.v3; - -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm] -// [#extension: envoy.filters.http.wasm] -// Wasm :ref:`configuration overview `. - -message Wasm { - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto b/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto deleted file mode 100644 index cb439b0973ba9..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.http_inspector.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.http_inspector.v3"; -option java_outer_classname = "HttpInspectorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP Inspector Filter] -// Detect whether the application protocol is HTTP. -// [#extension: envoy.filters.listener.http_inspector] - -message HttpInspector { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.http_inspector.v2.HttpInspector"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto b/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto deleted file mode 100644 index 8239c5c42c528..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.original_dst.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_dst.v3"; -option java_outer_classname = "OriginalDstProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Original Dst Filter] -// Use the Original destination address on downstream connections. -// [#extension: envoy.filters.listener.original_dst] - -message OriginalDst { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.original_dst.v2.OriginalDst"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto b/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto deleted file mode 100644 index aa0603cdff47d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.original_src.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_src.v3"; -option java_outer_classname = "OriginalSrcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Original Src Filter] -// Use the Original source address on upstream connections. -// [#extension: envoy.filters.listener.original_src] - -// The Original Src filter binds upstream connections to the original source address determined -// for the connection. This address could come from something like the Proxy Protocol filter, or it -// could come from trusted http headers. -message OriginalSrc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.original_src.v2alpha1.OriginalSrc"; - - // Whether to bind the port to the one used in the original downstream connection. - // [#not-implemented-hide:] - bool bind_port = 1; - - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to - // ensure that non-local addresses may be routed back through envoy when binding to the original - // source address. The option will not be applied if the mark is 0. - uint32 mark = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto deleted file mode 100644 index fb8047d391e95..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.proxy_protocol.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3"; -option java_outer_classname = "ProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Proxy Protocol Filter] -// PROXY protocol listener filter. -// [#extension: envoy.filters.listener.proxy_protocol] - -message ProxyProtocol { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.proxy_protocol.v2.ProxyProtocol"; - - message KeyValuePair { - // The namespace — if this is empty, the filter's namespace will be used. - string metadata_namespace = 1; - - // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_len: 1}]; - } - - // A Rule defines what metadata to apply when a header is present or missing. - message Rule { - // The type that triggers the rule - required - // TLV type is defined as uint8_t in proxy protocol. See `the spec - // `_ for details. - uint32 tlv_type = 1 [(validate.rules).uint32 = {lt: 256}]; - - // If the TLV type is present, apply this metadata KeyValuePair. - KeyValuePair on_tlv_present = 2; - } - - // The list of rules to apply to requests. - repeated Rule rules = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto b/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto deleted file mode 100644 index eff9774844f4b..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.tls_inspector.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.tls_inspector.v3"; -option java_outer_classname = "TlsInspectorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: TLS Inspector Filter] -// Allows detecting whether the transport appears to be TLS or plaintext. -// [#extension: envoy.filters.listener.tls_inspector] - -message TlsInspector { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.tls_inspector.v2.TlsInspector"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto deleted file mode 100644 index 2ed14c7f0e237..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.client_ssl_auth.v3; - -import "envoy/config/core/v3/address.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.client_ssl_auth.v3"; -option java_outer_classname = "ClientSslAuthProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Client TLS authentication] -// Client TLS authentication -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.client_ssl_auth] - -message ClientSSLAuth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.client_ssl_auth.v2.ClientSSLAuth"; - - // The :ref:`cluster manager ` cluster that runs - // the authentication service. The filter will connect to the service every 60s to fetch the list - // of principals. The service must support the expected :ref:`REST API - // `. - string auth_api_cluster = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; - - // Time in milliseconds between principal refreshes from the - // authentication service. Default is 60000 (60s). The actual fetch time - // will be this value plus a random jittered value between - // 0-refresh_delay_ms milliseconds. - google.protobuf.Duration refresh_delay = 3; - - // An optional list of IP address and subnet masks that should be white - // listed for access by the filter. If no list is provided, there is no - // IP allowlist. - repeated config.core.v3.CidrRange ip_white_list = 4 - [(udpa.annotations.field_migrate).rename = "ip_allowlist"]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/connection_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/connection_limit.proto deleted file mode 100644 index ccd30aaba6922..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/connection_limit.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.connection_limit.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.connection_limit.v3"; -option java_outer_classname = "ConnectionLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Connection limit] -// Connection limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.connection_limit] - -message ConnectionLimit { - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The max connections configuration to use for new incoming connections that are processed - // by the filter's filter chain. When max_connection is reached, the incoming connection - // will be closed after delay duration. - google.protobuf.UInt64Value max_connections = 2 [(validate.rules).uint64 = {gte: 1}]; - - // The delay configuration to use for rejecting the connection after some specified time duration - // instead of immediately rejecting the connection. That way, a malicious user is not able to - // retry as fast as possible which provides a better DoS protection for Envoy. If this is not present, - // the connection will be closed immediately. - google.protobuf.Duration delay = 3; - - // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults - // to enabled. - config.core.v3.RuntimeFeatureFlag runtime_enabled = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto deleted file mode 100644 index 2742372b2f91d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.direct_response.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.direct_response.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Direct response] -// Direct response :ref:`configuration overview `. -// [#extension: envoy.filters.network.direct_response] - -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.direct_response.v2.Config"; - - // Response data as a data source. - config.core.v3.DataSource response = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto deleted file mode 100644 index fa1959a425c8e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.router.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.router.v3"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Router] -// Dubbo router :ref:`configuration overview `. - -message Router { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.dubbo.router.v2alpha1.Router"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD deleted file mode 100644 index b6e6273d28f50..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto deleted file mode 100644 index 646f053ca9b6c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.v3; - -import "envoy/extensions/filters/network/dubbo_proxy/v3/route.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; -option java_outer_classname = "DubboProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dubbo Proxy] -// Dubbo Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.dubbo_proxy] - -// Dubbo Protocol types supported by Envoy. -enum ProtocolType { - // the default protocol. - Dubbo = 0; -} - -// Dubbo Serialization types supported by Envoy. -enum SerializationType { - // the default serialization protocol. - Hessian2 = 0; -} - -// [#next-free-field: 6] -message DubboProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy"; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Configure the protocol used. - ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; - - // Configure the serialization protocol used. - SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; - - // The route table for the connection manager is static and is specified in this property. - repeated RouteConfiguration route_config = 4; - - // A list of individual Dubbo filters that make up the filter chain for requests made to the - // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no dubbo_filters are specified, a default Dubbo router filter - // (`envoy.filters.dubbo.router`) is used. - repeated DubboFilter dubbo_filters = 5; -} - -// DubboFilter configures a Dubbo filter. -message DubboFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter"; - - // The name of the filter to instantiate. The name must match a supported - // filter. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any config = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto deleted file mode 100644 index e255985ed8e46..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto +++ /dev/null @@ -1,129 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.v3; - -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/v3/range.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dubbo Proxy Route Configuration] -// Dubbo Proxy :ref:`configuration overview `. - -// [#next-free-field: 6] -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteConfiguration"; - - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The interface name of the service. Wildcard interface are supported in the suffix or prefix form. - // e.g. ``*.methods.add`` will match ``com.dev.methods.add``, ``com.prod.methods.add``, etc. - // ``com.dev.methods.*`` will match ``com.dev.methods.add``, ``com.dev.methods.update``, etc. - // Special wildcard ``*`` matching any interface. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*.methods.add`` will match ``com.dev.methods.add`` but not ``.methods.add``. - string interface = 2; - - // Which group does the interface belong to. - string group = 3; - - // The version number of the interface. - string version = 4; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 5; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteMatch"; - - // Method level routing matching. - MethodMatch method = 1; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated config.route.v3.HeaderMatcher headers = 2; -} - -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteAction"; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - // Currently ClusterWeight only supports the name and weight fields. - config.route.v3.WeightedCluster weighted_clusters = 2; - } -} - -message MethodMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch"; - - // The parameter matching type. - message ParameterMatchSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch.ParameterMatchSpecifier"; - - oneof parameter_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 3; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting - // of an optional plus or minus sign followed by a sequence of digits. The rule will not match - // if the header value does not represent an integer. Match will fail for empty values, - // floating point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, - // "somestring", 10.9, "-1somestring" - type.v3.Int64Range range_match = 4; - } - } - - // The name of the method. - type.matcher.v3.StringMatcher name = 1; - - // Method parameter definition. - // The key is the parameter index, starting from 0. - // The value is the parameter matching type. - map params_match = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/echo/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto b/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto deleted file mode 100644 index 077d87259b6b2..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.echo.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.echo.v3"; -option java_outer_classname = "EchoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Echo] -// Echo :ref:`configuration overview `. -// [#extension: envoy.filters.network.echo] - -message Echo { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.echo.v2.Echo"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD deleted file mode 100644 index 3f3a5395d2aa7..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto deleted file mode 100644 index c40adb5f26bd8..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.ext_authz.v3; - -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/type/matcher/v3/metadata.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v3"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Network External Authorization ] -// The network layer external authorization service configuration -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.ext_authz] - -// External Authorization filter calls out to an external service over the -// gRPC Authorization API defined by -// :ref:`CheckRequest `. -// A failed check will cause this filter to close the TCP connection. -// [#next-free-field: 8] -message ExtAuthz { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.ext_authz.v2.ExtAuthz"; - - // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The external authorization gRPC service configuration. - // The default timeout is set to 200ms by this filter. - config.core.v3.GrpcService grpc_service = 2; - - // The filter's behaviour in case the external authorization service does - // not respond back. When it is set to true, Envoy will also allow traffic in case of - // communication failure between authorization service and the proxy. - // Defaults to false. - bool failure_mode_allow = 3; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 4; - - // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and - // version of Check{Request,Response} used on the wire. - config.core.v3.ApiVersion transport_api_version = 5 - [(validate.rules).enum = {defined_only: true}]; - - // Specifies if the filter is enabled with metadata matcher. - // If this field is not specified, the filter will be enabled for all requests. - type.matcher.v3.MetadataMatcher filter_enabled_metadata = 6; - - // Optional labels that will be passed to :ref:`labels` in - // :ref:`destination`. - // The labels will be read from :ref:`metadata` with the specified key. - string bootstrap_metadata_labels_key = 7; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD deleted file mode 100644 index 456f4e9e61702..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/network/http_connection_manager/v2:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/config/trace/v3:pkg", - "//envoy/type/http/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto deleted file mode 100644 index b5544eaa93b7c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ /dev/null @@ -1,1018 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.http_connection_manager.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/protocol.proto"; -import "envoy/config/core/v3/substitution_format_string.proto"; -import "envoy/config/route/v3/route.proto"; -import "envoy/config/route/v3/scoped_route.proto"; -import "envoy/config/trace/v3/http_tracer.proto"; -import "envoy/type/http/v3/path_transformation.proto"; -import "envoy/type/tracing/v3/custom_tag.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3"; -option java_outer_classname = "HttpConnectionManagerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP connection manager] -// HTTP connection manager :ref:`configuration overview `. -// [#extension: envoy.filters.network.http_connection_manager] - -// [#next-free-field: 49] -message HttpConnectionManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; - - enum CodecType { - // For every new connection, the connection manager will determine which - // codec to use. This mode supports both ALPN for TLS listeners as well as - // protocol inference for plaintext listeners. If ALPN data is available, it - // is preferred, otherwise protocol inference is used. In almost all cases, - // this is the right option to choose for this setting. - AUTO = 0; - - // The connection manager will assume that the client is speaking HTTP/1.1. - HTTP1 = 1; - - // The connection manager will assume that the client is speaking HTTP/2 - // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. - // Prior knowledge is allowed). - HTTP2 = 2; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 3; - } - - enum ServerHeaderTransformation { - // Overwrite any Server header with the contents of server_name. - OVERWRITE = 0; - - // If no Server header is present, append Server server_name - // If a Server header is present, pass it through. - APPEND_IF_ABSENT = 1; - - // Pass through the value of the server header, and do not append a header - // if none is present. - PASS_THROUGH = 2; - } - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - enum ForwardClientCertDetails { - // Do not send the XFCC header to the next hop. This is the default value. - SANITIZE = 0; - - // When the client connection is mTLS (Mutual TLS), forward the XFCC header - // in the request. - FORWARD_ONLY = 1; - - // When the client connection is mTLS, append the client certificate - // information to the request’s XFCC header and forward it. - APPEND_FORWARD = 2; - - // When the client connection is mTLS, reset the XFCC header with the client - // certificate information and send it to the next hop. - SANITIZE_SET = 3; - - // Always forward the XFCC header in the request, regardless of whether the - // client connection is mTLS. - ALWAYS_FORWARD_ONLY = 4; - } - - // Determines the action for request that contain %2F, %2f, %5C or %5c sequences in the URI path. - // This operation occurs before URL normalization and the merge slashes transformations if they were enabled. - enum PathWithEscapedSlashesAction { - // Default behavior specific to implementation (i.e. Envoy) of this configuration option. - // Envoy, by default, takes the KEEP_UNCHANGED action. - // NOTE: the implementation may change the default behavior at-will. - IMPLEMENTATION_SPECIFIC_DEFAULT = 0; - - // Keep escaped slashes. - KEEP_UNCHANGED = 1; - - // Reject client request with the 400 status. gRPC requests will be rejected with the INTERNAL (13) error code. - // The "httpN.downstream_rq_failed_path_normalization" counter is incremented for each rejected request. - REJECT_REQUEST = 2; - - // Unescape %2F and %5C sequences and redirect request to the new path if these sequences were present. - // Redirect occurs after path normalization and merge slashes transformations if they were configured. - // NOTE: gRPC requests will be rejected with the INTERNAL (13) error code. - // This option minimizes possibility of path confusion exploits by forcing request with unescaped slashes to - // traverse all parties: downstream client, intermediate proxies, Envoy and upstream server. - // The "httpN.downstream_rq_redirected_with_normalized_path" counter is incremented for each - // redirected request. - UNESCAPE_AND_REDIRECT = 3; - - // Unescape %2F and %5C sequences. - // Note: this option should not be enabled if intermediaries perform path based access control as - // it may lead to path confusion vulnerabilities. - UNESCAPE_AND_FORWARD = 4; - } - - // [#next-free-field: 10] - message Tracing { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing"; - - enum OperationName { - // The HTTP listener is used for ingress/incoming requests. - INGRESS = 0; - - // The HTTP listener is used for egress/outgoing requests. - EGRESS = 1; - } - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.v3.Percent client_sampling = 3; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.Percent random_sampling = 4; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.Percent overall_sampling = 5; - - // Whether to annotate spans with additional data. If true, spans will include logs for stream - // events. - bool verbose = 6; - - // Maximum length of the request path to extract and include in the HttpUrl tag. Used to - // truncate lengthy request paths to meet the needs of a tracing backend. - // Default: 256 - google.protobuf.UInt32Value max_path_tag_length = 7; - - // A list of custom tags with unique tag name to create tags for the active span. - repeated type.tracing.v3.CustomTag custom_tags = 8; - - // Configuration for an external tracing provider. - // If not specified, no tracing will be performed. - // - // .. attention:: - // Please be aware that *envoy.tracers.opencensus* provider can only be configured once - // in Envoy lifetime. - // Any attempts to reconfigure it or to use different configurations for different HCM filters - // will be rejected. - // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes - // on OpenCensus side. - config.trace.v3.Tracing.Http provider = 9; - - OperationName hidden_envoy_deprecated_operation_name = 1 [ - deprecated = true, - (validate.rules).enum = {defined_only: true}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - repeated string hidden_envoy_deprecated_request_headers_for_tags = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - message InternalAddressConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager." - "InternalAddressConfig"; - - // Whether unix socket addresses should be considered internal. - bool unix_sockets = 1; - } - - // [#next-free-field: 7] - message SetCurrentClientCertDetails { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager." - "SetCurrentClientCertDetails"; - - reserved 2; - - // Whether to forward the subject of the client cert. Defaults to false. - google.protobuf.BoolValue subject = 1; - - // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - // XFCC header comma separated from other values with the value Cert="PEM". - // Defaults to false. - bool cert = 3; - - // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - // format. This will appear in the XFCC header comma separated from other values with the value - // Chain="PEM". - // Defaults to false. - bool chain = 6; - - // Whether to forward the DNS type Subject Alternative Names of the client cert. - // Defaults to false. - bool dns = 4; - - // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - // false. - bool uri = 5; - } - - // The configuration for HTTP upgrades. - // For each upgrade type desired, an UpgradeConfig must be added. - // - // .. warning:: - // - // The current implementation of upgrade headers does not handle - // multi-valued upgrade headers. Support for multi-valued headers may be - // added in the future if needed. - // - // .. warning:: - // The current implementation of upgrade headers does not work with HTTP/2 - // upstreams. - message UpgradeConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager." - "UpgradeConfig"; - - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] - // will be proxied upstream. - string upgrade_type = 1; - - // If present, this represents the filter chain which will be created for - // this type of upgrade. If no filters are present, the filter chain for - // HTTP connections will be used for this upgrade type. - repeated HttpFilter filters = 2; - - // Determines if upgrades are enabled or disabled by default. Defaults to true. - // This can be overridden on a per-route basis with :ref:`cluster - // ` as documented in the - // :ref:`upgrade documentation `. - google.protobuf.BoolValue enabled = 3; - } - - // [#not-implemented-hide:] Transformations that apply to path headers. Transformations are applied - // before any processing of requests by HTTP filters, routing, and matching. Only the normalized - // path will be visible internally if a transformation is enabled. Any path rewrites that the - // router performs (e.g. :ref:`regex_rewrite - // ` or :ref:`prefix_rewrite - // `) will apply to the *:path* header - // destined for the upstream. - // - // Note: access logging and tracing will show the original *:path* header. - message PathNormalizationOptions { - // [#not-implemented-hide:] Normalization applies internally before any processing of requests by - // HTTP filters, routing, and matching *and* will affect the forwarded *:path* header. Defaults - // to :ref:`NormalizePathRFC3986 - // `. When not - // specified, this value may be overridden by the runtime variable - // :ref:`http_connection_manager.normalize_path`. - // Envoy will respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 - // normalization due to disallowed characters.) - type.http.v3.PathTransformation forwarding_transformation = 1; - - // [#not-implemented-hide:] Normalization only applies internally before any processing of - // requests by HTTP filters, routing, and matching. These will be applied after full - // transformation is applied. The *:path* header before this transformation will be restored in - // the router filter and sent upstream unless it was mutated by a filter. Defaults to no - // transformations. - // Multiple actions can be applied in the same Transformation, forming a sequential - // pipeline. The transformations will be performed in the order that they appear. Envoy will - // respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 - // normalization due to disallowed characters.) - type.http.v3.PathTransformation http_filter_transformation = 2; - } - - reserved 27; - - // Supplies the type of codec that the connection manager should use. - CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics for the - // connection manager. See the :ref:`statistics documentation ` for - // more information. - string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; - - // The route table for the connection manager is static and is specified in this property. - config.route.v3.RouteConfiguration route_config = 4; - - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } - - // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. :ref:`Order matters ` - // as the filters are processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; - - // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; - - // Presence of the object defines whether the connection manager - // emits :ref:`tracing ` data to the :ref:`configured tracing provider - // `. - Tracing tracing = 7; - - // Additional settings for HTTP requests handled by the connection manager. These will be - // applicable to both HTTP1 and HTTP2 requests. - config.core.v3.HttpProtocolOptions common_http_protocol_options = 35 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - config.core.v3.Http1ProtocolOptions http_protocol_options = 8; - - // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v3.Http2ProtocolOptions http2_protocol_options = 9 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. - // [#not-implemented-hide:] - config.core.v3.Http3ProtocolOptions http3_protocol_options = 44; - - // An optional override that the connection manager will write to the server - // header in responses. If not set, the default is *envoy*. - string server_name = 10 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Defines the action to be applied to the Server header on the response path. - // By default, Envoy will overwrite the header with the value specified in - // server_name. - ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum = {defined_only: true}]; - - // Allows for explicit transformation of the :scheme header on the request path. - // If not set, Envoy's default :ref:`scheme ` - // handling applies. - config.core.v3.SchemeHeaderTransformation scheme_header_transformation = 48; - - // The maximum request headers size for incoming connections. - // If unconfigured, the default max request headers allowed is 60 KiB. - // Requests that exceed this limit will receive a 431 response. - google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32 = {lte: 8192 gt: 0}]; - - // The stream idle timeout for connections managed by the connection manager. - // If not specified, this defaults to 5 minutes. The default value was selected - // so as not to interfere with any smaller configured timeouts that may have - // existed in configurations prior to the introduction of this feature, while - // introducing robustness to TCP connections that terminate without a FIN. - // - // This idle timeout applies to new streams and is overridable by the - // :ref:`route-level idle_timeout - // `. Even on a stream in - // which the override applies, prior to receipt of the initial request - // headers, the :ref:`stream_idle_timeout - // ` - // applies. Each time an encode/decode event for headers or data is processed - // for the stream, the timer will be reset. If the timeout fires, the stream - // is terminated with a 408 Request Timeout error code if no upstream response - // header has been received, otherwise a stream reset occurs. - // - // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough - // window to write any remaining stream data once the entirety of stream data (local end stream is - // true) has been buffered pending available window. In other words, this timeout defends against - // a peer that does not release enough window to completely write the stream, even though all - // data has been proxied within available flow control windows. If the timeout is hit in this - // case, the :ref:`tx_flush_timeout ` counter will be - // incremented. Note that :ref:`max_stream_duration - // ` does not apply to - // this corner case. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled according to the value for - // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - // - // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due - // to the granularity of events presented to the connection manager. For example, while receiving - // very large request headers, it may be the case that there is traffic regularly arriving on the - // wire while the connection manage is only able to observe the end-of-headers event, hence the - // stream may still idle timeout. - // - // A value of 0 will completely disable the connection manager stream idle - // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The amount of time that Envoy will wait for the entire request to be received. - // The timer is activated when the request is initiated, and is disarmed when the last byte of the - // request is sent upstream (i.e. all decoding filters have processed the request), OR when the - // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The amount of time that Envoy will wait for the request headers to be received. The timer is - // activated when the first byte of the headers is received, and is disarmed when the last byte of - // the headers has been received. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_headers_timeout = 41 [ - (validate.rules).duration = {gte {}}, - (udpa.annotations.security).configure_for_untrusted_downstream = true - ]; - - // The time that Envoy will wait between sending an HTTP/2 “shutdown - // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - // This is used so that Envoy provides a grace period for new streams that - // race with the final GOAWAY frame. During this grace period, Envoy will - // continue to accept new streams. After the grace period, a final GOAWAY - // frame is sent and Envoy will start refusing new streams. Draining occurs - // both when a connection hits the idle timeout or during general server - // draining. The default grace period is 5000 milliseconds (5 seconds) if this - // option is not specified. - google.protobuf.Duration drain_timeout = 12; - - // The delayed close timeout is for downstream connections managed by the HTTP connection manager. - // It is defined as a grace period after connection close processing has been locally initiated - // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy - // from the downstream connection) prior to Envoy closing the socket associated with that - // connection. - // NOTE: This timeout is enforced even when the socket associated with the downstream connection - // is pending a flush of the write buffer. However, any progress made writing data to the socket - // will restart the timer associated with this timeout. This means that the total grace period for - // a socket in this state will be - // +. - // - // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close - // sequence mitigates a race condition that exists when downstream clients do not drain/process - // data in a connection's receive buffer after a remote close has been detected via a socket - // write(). This race leads to such clients failing to process the response code sent by Envoy, - // which could result in erroneous downstream processing. - // - // If the timeout triggers, Envoy will close the connection's socket. - // - // The default timeout is 1000 ms if this option is not specified. - // - // .. NOTE:: - // To be useful in avoiding the race condition described above, this timeout must be set - // to *at least* +<100ms to account for - // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. - // - // .. WARNING:: - // A value of 0 will completely disable delayed close processing. When disabled, the downstream - // connection's socket will be closed immediately after the write flush is completed or will - // never close if the write flush does not complete. - google.protobuf.Duration delayed_close_timeout = 26; - - // Configuration for :ref:`HTTP access logs ` - // emitted by the connection manager. - repeated config.accesslog.v3.AccessLog access_log = 13; - - // If set to true, the connection manager will use the real remote address - // of the client connection when determining internal versus external origin and manipulating - // various headers. If set to false or absent, the connection manager will use the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for`, - // :ref:`config_http_conn_man_headers_x-envoy-internal`, and - // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The number of additional ingress proxy hops from the right side of the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when - // determining the origin client's IP address. The default is zero if this option - // is not specified. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - uint32 xff_num_trusted_hops = 19; - - // The configuration for the original IP detection extensions. - // - // When configured the extensions will be called along with the request headers - // and information about the downstream connection, such as the directly connected address. - // Each extension will then use these parameters to decide the request's effective remote address. - // If an extension fails to detect the original IP address and isn't configured to reject - // the request, the HCM will try the remaining extensions until one succeeds or rejects - // the request. If the request isn't rejected nor any extension succeeds, the HCM will - // fallback to using the remote address. - // - // .. WARNING:: - // Extensions cannot be used in conjunction with :ref:`use_remote_address - // ` - // nor :ref:`xff_num_trusted_hops - // `. - // - // [#extension-category: envoy.http.original_ip_detection] - repeated config.core.v3.TypedExtensionConfig original_ip_detection_extensions = 46; - - // Configures what network addresses are considered internal for stats and header sanitation - // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. - // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information about internal/external addresses. - InternalAddressConfig internal_address_config = 25; - - // If set, Envoy will not append the remote address to the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in - // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager - // has mutated the request headers. While :ref:`use_remote_address - // ` - // will also suppress XFF addition, it has consequences for logging and other - // Envoy uses of the remote address, so *skip_xff_append* should be used - // when only an elision of XFF addition is intended. - bool skip_xff_append = 21; - - // Via header value to append to request and response headers. If this is - // empty, no via header will be appended. - string via = 22 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Whether the connection manager will generate the :ref:`x-request-id - // ` header if it does not exist. This defaults to - // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature - // is not desired it can be disabled. - google.protobuf.BoolValue generate_request_id = 15; - - // Whether the connection manager will keep the :ref:`x-request-id - // ` header if passed for a request that is edge - // (Edge request is the request from external clients to front Envoy) and not reset it, which - // is the current Envoy behaviour. This defaults to false. - bool preserve_external_request_id = 32; - - // If set, Envoy will always set :ref:`x-request-id ` header in response. - // If this is false or not set, the request ID is returned in responses only if tracing is forced using - // :ref:`x-envoy-force-trace ` header. - bool always_set_request_id_in_response = 37; - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; - - // This field is valid only when :ref:`forward_client_cert_details - // ` - // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in - // the client certificate to be forwarded. Note that in the - // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and - // *By* is always set when the client certificate presents the URI type Subject Alternative Name - // value. - SetCurrentClientCertDetails set_current_client_cert_details = 17; - - // If proxy_100_continue is true, Envoy will proxy incoming "Expect: - // 100-continue" headers upstream, and forward "100 Continue" responses - // downstream. If this is false or not set, Envoy will instead strip the - // "Expect: 100-continue" header, and send a "100 Continue" response itself. - bool proxy_100_continue = 18; - - // If - // :ref:`use_remote_address - // ` - // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is - // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. - // This is useful for testing compatibility of upstream services that parse the header value. For - // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses - // `_ for details. This will also affect the - // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See - // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - // ` for runtime - // control. - // [#not-implemented-hide:] - bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - - repeated UpgradeConfig upgrade_configs = 23; - - // Should paths be normalized according to RFC 3986 before any processing of - // requests by HTTP filters or routing? This affects the upstream *:path* header - // as well. For paths that fail this check, Envoy will respond with 400 to - // paths that are malformed. This defaults to false currently but will default - // true in the future. When not specified, this value may be overridden by the - // runtime variable - // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison `_ - // for details of normalization. - // Note that Envoy does not perform - // `case normalization `_ - google.protobuf.BoolValue normalize_path = 30; - - // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec `_ and is provided for convenience. - bool merge_slashes = 33; - - // Action to take when request URL path contains escaped slash sequences (%2F, %2f, %5C and %5c). - // The default value can be overridden by the :ref:`http_connection_manager.path_with_escaped_slashes_action` - // runtime variable. - // The :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime - // variable can be used to apply the action to a portion of all requests. - PathWithEscapedSlashesAction path_with_escaped_slashes_action = 45; - - // The configuration of the request ID extension. This includes operations such as - // generation, validation, and associated tracing operations. If empty, the - // :ref:`UuidRequestIdConfig ` - // default extension is used with default parameters. See the documentation for that extension - // for details on what it does. Customizing the configuration for the default extension can be - // achieved by configuring it explicitly here. For example, to disable trace reason packing, - // the following configuration can be used: - // - // .. validated-code-block:: yaml - // :type-name: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension - // - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.request_id.uuid.v3.UuidRequestIdConfig - // pack_trace_reason: false - // - // [#extension-category: envoy.request_id] - RequestIDExtension request_id_extension = 36; - - // The configuration to customize local reply returned by Envoy. It can customize status code, - // body text and response content type. If not specified, status code and text body are hard - // coded in Envoy, the response content type is plain text. - LocalReplyConfig local_reply_config = 38; - - // Determines if the port part should be removed from host/authority header before any processing - // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` - // local port. This affects the upstream host header unless the method is - // CONNECT in which case if no filter adds a port the original port will be restored before headers are - // sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. - bool strip_matching_host_port = 39 - [(udpa.annotations.field_migrate).oneof_promotion = "strip_port_mode"]; - - oneof strip_port_mode { - // Determines if the port part should be removed from host/authority header before any processing - // of request by HTTP filters or routing. - // This affects the upstream host header unless the method is CONNECT in - // which case if no filter adds a port the original port will be restored before headers are sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. - bool strip_any_host_port = 42; - } - - // Governs Envoy's behavior when receiving invalid HTTP from downstream. - // If this option is false (default), Envoy will err on the conservative side handling HTTP - // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. - // If this option is set to true, Envoy will be more permissive, only resetting the invalid - // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire - // request is read for HTTP/1.1) - // In general this should be true for deployments receiving trusted traffic (L2 Envoys, - // company-internal mesh) and false when receiving untrusted traffic (edge deployments). - // - // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are - // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message - // ` or the new HTTP/2 option - // :ref:`override_stream_error_on_invalid_http_message - // ` - // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging - // ` - google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; - - // [#not-implemented-hide:] Path normalization configuration. This includes - // configurations for transformations (e.g. RFC 3986 normalization or merge - // adjacent slashes) and the policy to apply them. The policy determines - // whether transformations affect the forwarded *:path* header. RFC 3986 path - // normalization is enabled by default and the default policy is that the - // normalized header will be forwarded. See :ref:`PathNormalizationOptions - // ` - // for details. - PathNormalizationOptions path_normalization_options = 43; - - // Determines if trailing dot of the host should be removed from host/authority header before any - // processing of request by HTTP filters or routing. - // This affects the upstream host header. - // Without setting this option, incoming requests with host `example.com.` will not match against - // route with :ref:`domains` match set to `example.com`. Defaults to `false`. - // When the incoming request contains a host/authority header that includes a port number, - // setting this option will strip a trailing dot, if present, from the host section, - // leaving the port as is (e.g. host value `example.com.:443` will be updated to `example.com:443`). - bool strip_trailing_host_dot = 47; - - google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// The configuration to customize local reply returned by Envoy. -message LocalReplyConfig { - // Configuration of list of mappers which allows to filter and change local response. - // The mappers will be checked by the specified order until one is matched. - repeated ResponseMapper mappers = 1; - - // The configuration to form response body from the :ref:`command operators ` - // and to specify response content type as one of: plain/text or application/json. - // - // Example one: "plain/text" ``body_format``. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // The following response body in "plain/text" format will be generated for a request with - // local reply body of "upstream connection error", response_code=503 and path=/foo. - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - // Example two: "application/json" ``body_format``. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // json_format: - // status: "%RESPONSE_CODE%" - // message: "%LOCAL_REPLY_BODY%" - // path: "%REQ(:path)%" - // - // The following response body in "application/json" format would be generated for a request with - // local reply body of "upstream connection error", response_code=503 and path=/foo. - // - // .. code-block:: json - // - // { - // "status": 503, - // "message": "upstream connection error", - // "path": "/foo" - // } - // - config.core.v3.SubstitutionFormatString body_format = 2; -} - -// The configuration to filter and change local response. -// [#next-free-field: 6] -message ResponseMapper { - // Filter to determine if this mapper should apply. - config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; - - // The new response status code if specified. - google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` - // command operator in the `body_format`. - config.core.v3.DataSource body = 3; - - // A per mapper `body_format` to override the :ref:`body_format `. - // It will be used when this mapper is matched. - config.core.v3.SubstitutionFormatString body_format_override = 4; - - // HTTP headers to add to a local reply. This allows the response mapper to append, to add - // or to override headers of any local reply before it is sent to a downstream client. - repeated config.core.v3.HeaderValueOption headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; -} - -message Rds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.Rds"; - - // Configuration source specifier for RDS. - config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2; -} - -// This message is used to work around the limitations with 'oneof' and repeated fields. -message ScopedRouteConfigurationsList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRouteConfigurationsList"; - - repeated config.route.v3.ScopedRouteConfiguration scoped_route_configurations = 1 - [(validate.rules).repeated = {min_items: 1}]; -} - -// [#next-free-field: 6] -message ScopedRoutes { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes"; - - // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These - // keys are matched against a set of :ref:`Key` - // objects assembled from :ref:`ScopedRouteConfiguration` - // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via - // :ref:`scoped_route_configurations_list`. - // - // Upon receiving a request's headers, the Router will build a key using the algorithm specified - // by this message. This key will be used to look up the routing table (i.e., the - // :ref:`RouteConfiguration`) to use for the request. - message ScopeKeyBuilder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder"; - - // Specifies the mechanism for constructing key fragments which are composed into scope keys. - message FragmentBuilder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder." - "FragmentBuilder"; - - // Specifies how the value of a header should be extracted. - // The following example maps the structure of a header to the fields in this message. - // - // .. code:: - // - // <0> <1> <-- index - // X-Header: a=b;c=d - // | || | - // | || \----> - // | || - // | |\----> - // | | - // | \----> - // | - // \----> - // - // Each 'a=b' key-value pair constitutes an 'element' of the header field. - message HeaderValueExtractor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder." - "FragmentBuilder.HeaderValueExtractor"; - - // Specifies a header field's key value pair to match on. - message KvElement { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder." - "FragmentBuilder.HeaderValueExtractor.KvElement"; - - // The separator between key and value (e.g., '=' separates 'k=v;...'). - // If an element is an empty string, the element is ignored. - // If an element contains no separator, the whole element is parsed as key and the - // fragment value is an empty string. - // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_len: 1}]; - - // The key to match on. - string key = 2 [(validate.rules).string = {min_len: 1}]; - } - - // The name of the header field to extract the value from. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The element separator (e.g., ';' separates 'a;b;c;d'). - // Default: empty string. This causes the entirety of the header field to be extracted. - // If this field is set to an empty string and 'index' is used in the oneof below, 'index' - // must be set to 0. - string element_separator = 2; - - oneof extract_type { - // Specifies the zero based index of the element to extract. - // Note Envoy concatenates multiple values of the same header key into a comma separated - // string, the splitting always happens after the concatenation. - uint32 index = 3; - - // Specifies the key value pair to extract the value from. - KvElement element = 4; - } - } - - oneof type { - option (validate.required) = true; - - // Specifies how a header field's value should be extracted. - HeaderValueExtractor header_value_extractor = 1; - } - } - - // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the - // fragments of a :ref:`ScopedRouteConfiguration`. - // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. - repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The algorithm to use for constructing a scope key for each request. - ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; - - // Configuration source specifier for RDS. - // This config source is used to subscribe to RouteConfiguration resources specified in - // ScopedRouteConfiguration messages. - config.core.v3.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; - - oneof config_specifier { - option (validate.required) = true; - - // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by - // matching a key constructed from the request's attributes according to the algorithm specified - // by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRouteConfigurationsList scoped_route_configurations_list = 4; - - // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS - // API. A scope is assigned to a request by matching a key constructed from the request's - // attributes according to the algorithm specified by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRds scoped_rds = 5; - } -} - -message ScopedRds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRds"; - - // Configuration source specifier for scoped RDS. - config.core.v3.ConfigSource scoped_rds_config_source = 1 - [(validate.rules).message = {required: true}]; - - // xdstp:// resource locator for scoped RDS collection. - // [#not-implemented-hide:] - string srds_resources_locator = 2; -} - -// [#next-free-field: 7] -message HttpFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpFilter"; - - reserved 3; - - // The name of the filter configuration. The name is used as a fallback to - // select an extension if the type of the configuration proto is not - // sufficient. It also serves as a resource name in ExtensionConfigDS. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - // - // To support configuring a :ref:`match tree `, use an - // :ref:`ExtensionWithMatcher ` - // with the desired HTTP filter. - // [#extension-category: envoy.filters.http] - google.protobuf.Any typed_config = 4; - - // Configuration source specifier for an extension configuration discovery service. - // In case of a failure and without the default configuration, the HTTP listener responds with code 500. - // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). - // - // To support configuring a :ref:`match tree `, use an - // :ref:`ExtensionWithMatcher ` - // with the desired HTTP filter. This works for both the default filter configuration as well - // as for filters provided via the API. - config.core.v3.ExtensionConfigSource config_discovery = 5; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // If true, clients that do not support this filter may ignore the - // filter but otherwise accept the config. - // Otherwise, clients that do not support this filter must reject the config. - // This is also same with typed per filter config. - bool is_optional = 6; -} - -message RequestIDExtension { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.RequestIDExtension"; - - // Request ID extension specific configuration. - google.protobuf.Any typed_config = 1; -} - -// [#protodoc-title: Envoy Mobile HTTP connection manager] -// HTTP connection manager for use in Envoy mobile. -// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] -message EnvoyMobileHttpConnectionManager { - // The configuration for the underlying HttpConnectionManager which will be - // instantiated for Envoy mobile. - HttpConnectionManager config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/BUILD deleted file mode 100644 index ad2fc9a9a84fd..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto deleted file mode 100644 index 3ee3655b7c3c9..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.local_ratelimit.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/token_bucket.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.local_ratelimit.v3"; -option java_outer_classname = "LocalRateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Local rate limit] -// Local rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.local_ratelimit] - -message LocalRateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.local_rate_limit.v2alpha.LocalRateLimit"; - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The token bucket configuration to use for rate limiting connections that are processed by the - // filter's filter chain. Each incoming connection processed by the filter consumes a single - // token. If the token is available, the connection will be allowed. If no tokens are available, - // the connection will be immediately closed. - // - // .. note:: - // In the current implementation each filter and filter chain has an independent rate limit. - // - // .. note:: - // In the current implementation the token bucket's :ref:`fill_interval - // ` must be >= 50ms to avoid too aggressive - // refills. - type.v3.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}]; - - // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults - // to enabled. - config.core.v3.RuntimeFeatureFlag runtime_enabled = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/BUILD deleted file mode 100644 index d399b876a7f43..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/filters/common/fault/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto deleted file mode 100644 index ebdfb6f2fcc0c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.mongo_proxy.v3; - -import "envoy/extensions/filters/common/fault/v3/fault.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.mongo_proxy.v3"; -option java_outer_classname = "MongoProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Mongo proxy] -// MongoDB :ref:`configuration overview `. -// [#extension: envoy.filters.network.mongo_proxy] - -// [#next-free-field: 6] -message MongoProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.mongo_proxy.v2.MongoProxy"; - - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The optional path to use for writing Mongo access logs. If not access log - // path is specified no access logs will be written. Note that access log is - // also gated :ref:`runtime `. - string access_log = 2; - - // Inject a fixed delay before proxying a Mongo operation. Delays are - // applied to the following MongoDB operations: Query, Insert, GetMore, - // and KillCursors. Once an active delay is in progress, all incoming - // data up until the timer event fires will be a part of the delay. - common.fault.v3.FaultDelay delay = 3; - - // Flag to specify whether :ref:`dynamic metadata - // ` should be emitted. Defaults to false. - bool emit_dynamic_metadata = 4; - - // List of commands to emit metrics for. Defaults to "delete", "insert", and "update". - // Note that metrics will not be emitted for "find" commands, since those are considered - // queries, and metrics for those are emitted under a dedicated "query" namespace. - repeated string commands = 5; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/BUILD deleted file mode 100644 index 9276f5ab3d2dd..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v3:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto deleted file mode 100644 index 2fcdda846b6af..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.ratelimit.v3; - -import "envoy/config/ratelimit/v3/rls.proto"; -import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.ratelimit.v3"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.ratelimit] - -// [#next-free-field: 7] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.rate_limit.v2.RateLimit"; - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string = {min_len: 1}]; - - // The rate limit descriptor list to use in the rate limit service request. - repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3 - [(validate.rules).repeated = {min_items: 1}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 5; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 6 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/BUILD deleted file mode 100644 index fd183569e5a1e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto b/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto deleted file mode 100644 index 4d1ff296fa4aa..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rbac.v3; - -import "envoy/config/rbac/v3/rbac.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v3"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.network.rbac] - -// RBAC network filter config. -// -// Header should not be used in rules/shadow_rules in RBAC network filter as -// this information is only available in :ref:`RBAC http filter `. -// [#next-free-field: 6] -message RBAC { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.rbac.v2.RBAC"; - - enum EnforcementType { - // Apply RBAC policies when the first byte of data arrives on the connection. - ONE_TIME_ON_FIRST_BYTE = 0; - - // Continuously apply RBAC policies as data arrives. Use this mode when - // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, - // etc. when the protocol decoders emit dynamic metadata such as the - // resources being accessed and the operations on the resources. - CONTINUOUS = 1; - } - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - // If present and empty, DENY. - config.rbac.v3.RBAC rules = 1; - - // Shadow rules are not enforced by the filter but will emit stats and logs - // and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v3.RBAC shadow_rules = 2; - - // If specified, shadow rules will emit stats with the given prefix. - // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with - // shadow rules. - string shadow_rules_stat_prefix = 5; - - // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; - - // RBAC enforcement strategy. By default RBAC will be enforced only once - // when the first byte of data arrives from the downstream. When used in - // conjunction with filters that emit dynamic metadata after decoding - // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to - // CONTINUOUS to enforce RBAC policies on every message boundary. - EnforcementType enforcement_type = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/BUILD deleted file mode 100644 index cc70c42fc4eb0..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/network/redis_proxy/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto deleted file mode 100644 index 2df7c3e3f6104..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ /dev/null @@ -1,324 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.redis_proxy.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.redis_proxy.v3"; -option java_outer_classname = "RedisProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Redis Proxy] -// Redis Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.redis_proxy] - -// [#next-free-field: 9] -message RedisProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; - - // Redis connection pool settings. - // [#next-free-field: 9] - message ConnPoolSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings"; - - // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently - // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data - // because replication is asynchronous and requires some delay. You need to ensure that your - // application can tolerate stale data. - enum ReadPolicy { - // Default mode. Read from the current primary node. - MASTER = 0 [(udpa.annotations.enum_value_migrate).rename = "PRIMARY"]; - - // Read from the primary, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1 [(udpa.annotations.enum_value_migrate).rename = "PREFER_PRIMARY"]; - - // Read from replica nodes. If multiple replica nodes are present within a shard, a random - // node is selected. Healthy nodes have precedent over unhealthy nodes. - REPLICA = 2; - - // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the primary. - PREFER_REPLICA = 3; - - // Read from any node of the cluster. A random node is selected among the primary and - // replicas, healthy nodes have precedent over unhealthy nodes. - ANY = 4; - } - - // Per-operation timeout in milliseconds. The timer starts when the first - // command of a pipeline is written to the backend connection. Each response received from Redis - // resets the timer since it signifies that the next command is being processed by the backend. - // The only exception to this behavior is when a connection to a backend is not yet established. - // In that case, the connect timeout on the cluster will govern the timeout until the connection - // is ready. - google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}]; - - // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be - // forwarded to the same upstream. The hash key used for determining the upstream in a - // consistent hash ring configuration will be computed from the hash tagged key instead of the - // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster - // implementation `_. - // - // Examples: - // - // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream - // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream - bool enable_hashtagging = 2; - - // Accept `moved and ask redirection - // `_ errors from upstream - // redis servers, and retry commands to the specified target server. The target server does not - // need to be known to the cluster manager. If the command cannot be redirected, then the - // original error is passed downstream unchanged. By default, this support is not enabled. - bool enable_redirection = 3; - - // Maximum size of encoded request buffer before flush is triggered and encoded requests - // are sent upstream. If this is unset, the buffer flushes whenever it receives data - // and performs no batching. - // This feature makes it possible for multiple clients to send requests to Envoy and have - // them batched- for example if one is running several worker processes, each with its own - // Redis connection. There is no benefit to using this with a single downstream process. - // Recommended size (if enabled) is 1024 bytes. - uint32 max_buffer_size_before_flush = 4; - - // The encoded request buffer is flushed N milliseconds after the first request has been - // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`. - // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise, - // the timer should be set according to the number of clients, overall request rate and - // desired maximum latency for a single command. For example, if there are many requests - // being batched together at a high rate, the buffer will likely be filled before the timer - // fires. Alternatively, if the request rate is lower the buffer will not be filled as often - // before the timer fires. - // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter - // defaults to 3ms. - google.protobuf.Duration buffer_flush_timeout = 5; - - // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts - // can be created at any given time by any given worker thread (see `enable_redirection` for - // more details). If the host is unknown and a connection cannot be created due to enforcing - // this limit, then redirection will fail and the original redirection error will be passed - // downstream unchanged. This limit defaults to 100. - google.protobuf.UInt32Value max_upstream_unknown_connections = 6; - - // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate - // count. These commands are measured in microseconds. - bool enable_command_stats = 8; - - // Read policy. The default is to read from the primary. - ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; - } - - message PrefixRoutes { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes"; - - message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route"; - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - message RequestMirrorPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route." - "RequestMirrorPolicy"; - - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // If not specified or the runtime key is not present, all requests to the target cluster - // will be mirrored. - // - // If specified, Envoy will lookup the runtime key to get the percentage of requests to the - // mirror. - config.core.v3.RuntimeFractionalPercent runtime_fraction = 2; - - // Set this to TRUE to only mirror write commands, this is effectively replicating the - // writes in a "fire and forget" manner. - bool exclude_read_commands = 3; - } - - // String prefix that must match the beginning of the keys. Envoy will always favor the - // longest match. - string prefix = 1 [(validate.rules).string = {max_bytes: 1000}]; - - // Indicates if the prefix needs to be removed from the key when forwarded. - bool remove_prefix = 2; - - // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string = {min_len: 1}]; - - // Indicates that the route has a request mirroring policy. - repeated RequestMirrorPolicy request_mirror_policy = 4; - } - - // List of prefix routes. - repeated Route routes = 1; - - // Indicates that prefix matching should be case insensitive. - bool case_insensitive = 2; - - // Optional catch-all route to forward commands that doesn't match any of the routes. The - // catch-all route becomes required when no routes are specified. - Route catch_all_route = 4; - - string hidden_envoy_deprecated_catch_all_cluster = 3 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // RedisFault defines faults used for fault injection. - message RedisFault { - enum RedisFaultType { - // Delays requests. This is the base fault; other faults can have delays added. - DELAY = 0; - - // Returns errors on requests. - ERROR = 1; - } - - // Fault type. - RedisFaultType fault_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Percentage of requests fault applies to. - config.core.v3.RuntimeFractionalPercent fault_enabled = 2 - [(validate.rules).message = {required: true}]; - - // Delay for all faults. If not set, defaults to zero - google.protobuf.Duration delay = 3; - - // Commands fault is restricted to, if any. If not set, fault applies to all commands - // other than auth and ping (due to special handling of those commands in Envoy). - repeated string commands = 4; - } - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Network settings for the connection pool to the upstream clusters. - ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; - - // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. This does not apply to upstream command stats currently. - bool latency_in_micros = 4; - - // List of **unique** prefixes used to separate keys from different workloads to different - // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all - // cluster can be used to forward commands when there is no match. Time complexity of the - // lookups are in O(min(longest key prefix, key length)). - // - // Example: - // - // .. code-block:: yaml - // - // prefix_routes: - // routes: - // - prefix: "ab" - // cluster: "cluster_a" - // - prefix: "abc" - // cluster: "cluster_b" - // - // When using the above routes, the following prefixes would be sent to: - // - // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b. - // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a. - // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all - // route` - // would have retrieved the key from that cluster instead. - // - // See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing clusters. - PrefixRoutes prefix_routes = 5; - - // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis - // AUTH command `_ with this password before enabling any other - // command. If an AUTH command's password matches this password, an "OK" response will be returned - // to the client. If the AUTH command password does not match this password, then an "ERR invalid - // password" error will be returned. If any other command is received before AUTH when this - // password is set, then a "NOAUTH Authentication required." error response will be sent to the - // client. If an AUTH command is received when the password is not set, then an "ERR Client sent - // AUTH, but no password is set" error will be returned. - config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; - - // List of faults to inject. Faults currently come in two flavors: - // - Delay, which delays a request. - // - Error, which responds to a request with an error. Errors can also have delays attached. - // - // Example: - // - // .. code-block:: yaml - // - // faults: - // - fault_type: ERROR - // fault_enabled: - // default_value: - // numerator: 10 - // denominator: HUNDRED - // runtime_key: "bogus_key" - // commands: - // - GET - // - fault_type: DELAY - // fault_enabled: - // default_value: - // numerator: 10 - // denominator: HUNDRED - // runtime_key: "bogus_key" - // delay: 2s - // - // See the :ref:`fault injection section - // ` for more information on how to configure this. - repeated RedisFault faults = 8; - - // If a username is provided an ACL style AUTH command will be required with a username and password. - // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis - // AUTH command `_ with this username and the *downstream_auth_password* - // before enabling any other command. If an AUTH command's username and password matches this username - // and the *downstream_auth_password* , an "OK" response will be returned to the client. If the AUTH - // command username or password does not match this username or the *downstream_auth_password*, then an - // "WRONGPASS invalid username-password pair" error will be returned. If any other command is received before AUTH when this - // password is set, then a "NOAUTH Authentication required." error response will be sent to the - // client. If an AUTH command is received when the password is not set, then an "ERR Client sent - // AUTH, but no ACL is set" error will be returned. - config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true]; - - string hidden_envoy_deprecated_cluster = 2 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// RedisProtocolOptions specifies Redis upstream protocol options. This object is used in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.redis_proxy`. -message RedisProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions"; - - // Upstream server password as defined by the `requirepass` directive - // `_ in the server's configuration file. - config.core.v3.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; - - // Upstream server username as defined by the `user` directive - // `_ in the server's configuration file. - config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto b/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto deleted file mode 100644 index 3d6f0ee234abb..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.sni_cluster.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_cluster.v3"; -option java_outer_classname = "SniClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SNI Cluster Filter] -// Set the upstream cluster name from the SNI field in the TLS connection. -// [#extension: envoy.filters.network.sni_cluster] - -message SniCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.sni_cluster.v2.SniCluster"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD deleted file mode 100644 index 05f25a2fe5d91..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/BUILD deleted file mode 100644 index d317ad9266de3..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/network/tcp_proxy/v2:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto deleted file mode 100644 index f00298a3edd4e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto +++ /dev/null @@ -1,179 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.tcp_proxy.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/hash_policy.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v3"; -option java_outer_classname = "TcpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: TCP Proxy] -// TCP Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.tcp_proxy] - -// [#next-free-field: 14] -message TcpProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy"; - - // Allows for specification of multiple upstream clusters along with weights - // that indicate the percentage of traffic to be forwarded to each cluster. - // The router selects an upstream cluster based on these weights. - message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster"; - - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight"; - - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When a request matches the route, the choice of an upstream cluster is - // determined by its weight. The sum of weights across all entries in the - // clusters array determines the total weight. - uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what is set in this field will be considered - // for load balancing. Note that this will be merged with what's provided in - // :ref:`TcpProxy.metadata_match - // `, with values - // here taking precedence. The filter name should be specified as *envoy.lb*. - config.core.v3.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Configuration for tunneling TCP over other transports or application layers. - // Tunneling is supported over both HTTP/1.1 and HTTP/2. Upstream protocol is - // determined by the cluster configuration. - message TunnelingConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.TunnelingConfig"; - - // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_len: 1}]; - - // Use POST method instead of CONNECT method to tunnel the TCP stream. - // The 'protocol: bytestream' header is also NOT set for HTTP/2 to comply with the spec. - // - // The upstream proxy is expected to convert POST payload as raw TCP. - bool use_post = 2; - - // Additional request headers to upstream proxy. This is mainly used to - // trigger upstream to convert POST requests back to CONNECT requests. - // - // Neither *:-prefixed* pseudo-headers nor the Host: header can be overridden. - repeated config.core.v3.HeaderValueOption headers_to_add = 3 - [(validate.rules).repeated = {max_items: 1000}]; - } - - message DeprecatedV1 { - option deprecated = true; - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1"; - - // [#next-free-field: 6] - message TCPRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1.TCPRoute"; - - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - repeated config.core.v3.CidrRange destination_ip_list = 2; - - string destination_ports = 3; - - repeated config.core.v3.CidrRange source_ip_list = 4; - - string source_ports = 5; - } - - repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } - - // Optional endpoint metadata match criteria. Only endpoints in the upstream - // cluster with metadata matching that set in metadata_match will be - // considered. The filter name should be specified as *envoy.lb*. - config.core.v3.Metadata metadata_match = 9; - - // The idle timeout for connections managed by the TCP proxy filter. The idle timeout - // is defined as the period in which there are no bytes sent or received on either - // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set - // to 0s, the timeout will be disabled. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - google.protobuf.Duration idle_timeout = 8; - - // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy - // filter. The idle timeout is defined as the period in which there is no - // active traffic. If not set, there is no idle timeout. When the idle timeout - // is reached the connection will be closed. The distinction between - // downstream_idle_timeout/upstream_idle_timeout provides a means to set - // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; - - // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; - - // Configuration for :ref:`access logs ` - // emitted by the this tcp_proxy. - repeated config.accesslog.v3.AccessLog access_log = 5; - - // The maximum number of unsuccessful connection attempts that will be made before - // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based - // load balancing algorithms will select a host randomly. Currently the number of hash policies is - // limited to 1. - repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; - - // If set, this configures tunneling, e.g. configuration options to tunnel TCP payload over - // HTTP CONNECT. If this message is absent, the payload will be proxied upstream as per usual. - TunnelingConfig tunneling_config = 12; - - // The maximum duration of a connection. The duration is defined as the period since a connection - // was established. If not set, there is no max duration. When max_downstream_connection_duration - // is reached the connection will be closed. Duration must be at least 1ms. - google.protobuf.Duration max_downstream_connection_duration = 13 - [(validate.rules).duration = {gte {nanos: 1000000}}]; - - DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD deleted file mode 100644 index 0bad14913d217..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto deleted file mode 100644 index 8583bbe4b468c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3; - -import "envoy/config/ratelimit/v3/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.ratelimit] - -// [#next-free-field: 6] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.thrift.rate_limit.v2alpha1.RateLimit"; - - // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string = {min_len: 1}]; - - // Specifies the rate limit configuration stage. Each configured rate limit filter performs a - // rate limit check using descriptors configured in the - // :ref:`envoy_v3_api_msg_extensions.filters.network.thrift_proxy.v3.RouteAction` for the request. - // Only those entries with a matching stage number are used for a given filter. If not set, the - // default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 3; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 4; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 5 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/BUILD deleted file mode 100644 index c24f669b9bbde..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/router.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/router.proto deleted file mode 100644 index 860622cb61e42..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/router.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.router.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.router.v3"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Router] -// Thrift router :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.router] - -message Router { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.thrift.router.v2alpha1.Router"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/BUILD deleted file mode 100644 index cdb143507f644..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", - "//envoy/config/route/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto deleted file mode 100644 index b79c9bc9619ea..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ /dev/null @@ -1,183 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/route/v3/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Thrift Proxy Route Configuration] -// Thrift Proxy :ref:`configuration overview `. - -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteConfiguration"; - - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteMatch"; - - oneof match_specifier { - option (validate.required) = true; - - // If specified, the route must exactly match the request method name. As a special case, an - // empty string matches any request method name. - string method_name = 1; - - // If specified, the route must have the service name as the request method name prefix. As a - // special case, an empty string matches any service name. Only relevant when service - // multiplexing. - string service_name = 2; - } - - // Inverts whatever matching is done in the :ref:`method_name - // ` or - // :ref:`service_name - // ` fields. - // Cannot be combined with wildcard matching as that would result in routes never being matched. - // - // .. note:: - // - // This does not invert matching done as part of the :ref:`headers field - // ` field. To - // invert header matching, see :ref:`invert_match - // `. - bool invert = 3; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). Note that this only applies for Thrift transports and/or - // protocols that support headers. - repeated config.route.v3.HeaderMatcher headers = 4; -} - -// [#next-free-field: 8] -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction"; - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // .. note:: - // - // Shadowing will not be triggered if the primary cluster does not exist. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration when the route configuration is loaded. - // If it disappears at runtime, the shadow request will silently be ignored. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // If not specified, all requests to the target cluster will be mirrored. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - config.core.v3.RuntimeFractionalPercent runtime_fraction = 2; - } - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates a single upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 2; - - // Envoy will determine the cluster to route to by reading the value of the - // Thrift header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist Envoy will - // respond with an unknown method exception or an internal error exception, - // respectively. - string cluster_header = 6 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered. - // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match - // `, - // with values there taking precedence. Keys and values should be provided under the "envoy.lb" - // metadata key. - config.core.v3.Metadata metadata_match = 3; - - // Specifies a set of rate limit configurations that could be applied to the route. - // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders - // action with the header name ":method-name". - repeated config.route.v3.RateLimit rate_limits = 4; - - // Strip the service prefix from the method name, if there's a prefix. For - // example, the method call Service:method would end up being just method. - bool strip_service_name = 5; - - // Indicates that the route has request mirroring policies. - repeated RequestMirrorPolicy request_mirror_policies = 7; -} - -// Allows for specification of multiple upstream clusters along with weights that indicate the -// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster -// based on these weights. -message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster"; - - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight"; - - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When a request matches the route, the choice of an upstream cluster is determined by its - // weight. The sum of weights across all entries in the clusters array determines the total - // weight. - google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field, combined with what's - // provided in :ref:`RouteAction's metadata_match - // `, - // will be considered. Values here will take precedence. Keys and values should be provided - // under the "envoy.lb" metadata key. - config.core.v3.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto deleted file mode 100644 index a03251a2ee3b0..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto +++ /dev/null @@ -1,141 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.v3; - -import "envoy/extensions/filters/network/thrift_proxy/v3/route.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; -option java_outer_classname = "ThriftProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Thrift Proxy] -// Thrift Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.thrift_proxy] - -// Thrift transport types supported by Envoy. -enum TransportType { - // For downstream connections, the Thrift proxy will attempt to determine which transport to use. - // For upstream connections, the Thrift proxy will use same transport as the downstream - // connection. - AUTO_TRANSPORT = 0; - - // The Thrift proxy will use the Thrift framed transport. - FRAMED = 1; - - // The Thrift proxy will use the Thrift unframed transport. - UNFRAMED = 2; - - // The Thrift proxy will assume the client is using the Thrift header transport. - HEADER = 3; -} - -// Thrift Protocol types supported by Envoy. -enum ProtocolType { - // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. - // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol - // detection. For upstream connections, the Thrift proxy will use the same protocol as the - // downstream connection. - AUTO_PROTOCOL = 0; - - // The Thrift proxy will use the Thrift binary protocol. - BINARY = 1; - - // The Thrift proxy will use Thrift non-strict binary protocol. - LAX_BINARY = 2; - - // The Thrift proxy will use the Thrift compact protocol. - COMPACT = 3; - - // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. - TWITTER = 4; -} - -// [#next-free-field: 8] -message ThriftProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProxy"; - - // Supplies the type of transport that the Thrift proxy should use. Defaults to - // :ref:`AUTO_TRANSPORT`. - TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use. Defaults to - // :ref:`AUTO_PROTOCOL`. - ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The route table for the connection manager is static and is specified in this property. - RouteConfiguration route_config = 4; - - // A list of individual Thrift filters that make up the filter chain for requests made to the - // Thrift proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no thrift_filters are specified, a default Thrift router filter - // (`envoy.filters.thrift.router`) is used. - // [#extension-category: envoy.thrift_proxy.filters] - repeated ThriftFilter thrift_filters = 5; - - // If set to true, Envoy will try to skip decode data after metadata in the Thrift message. - // This mode will only work if the upstream and downstream protocols are the same and the transport - // is the same, the transport type is framed and the protocol is not Twitter. Otherwise Envoy will - // fallback to decode the data. - bool payload_passthrough = 6; - - // Optional maximum requests for a single downstream connection. If not specified, there is no limit. - google.protobuf.UInt32Value max_requests_per_connection = 7; -} - -// ThriftFilter configures a Thrift filter. -message ThriftFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftFilter"; - - // The name of the filter to instantiate. The name must match a supported - // filter. The built-in filters are: - // - // [#comment:TODO(zuercher): Auto generate the following list] - // * :ref:`envoy.filters.thrift.router ` - // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in -// in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.thrift_proxy`. -message ThriftProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProtocolOptions"; - - // Supplies the type of transport that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_TRANSPORT`, - // which is the default, causes the proxy to use the same transport as the downstream connection. - TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_PROTOCOL`, - // which is the default, causes the proxy to use the same protocol as the downstream connection. - ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD deleted file mode 100644 index c37174bdefc46..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/wasm/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto deleted file mode 100644 index 1b27e18e3c314..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.wasm.v3; - -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm] -// [#extension: envoy.filters.network.wasm] -// Wasm :ref:`configuration overview `. - -message Wasm { - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto deleted file mode 100644 index eb2c202c58f1a..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.zookeeper_proxy.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.zookeeper_proxy.v3"; -option java_outer_classname = "ZookeeperProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: ZooKeeper proxy] -// ZooKeeper Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.zookeeper_proxy] - -message ZooKeeperProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.zookeeper_proxy.v1alpha1.ZooKeeperProxy"; - - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. - // If the access log field is empty, access logs will not be written. - string access_log = 2; - - // Messages — requests, responses and events — that are bigger than this value will - // be ignored. If it is not set, the default value is 1Mb. - // - // The value here should match the jute.maxbuffer property in your cluster configuration: - // - // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options - // - // if that is set. If it isn't, ZooKeeper's default is also 1Mb. - google.protobuf.UInt32Value max_packet_bytes = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD deleted file mode 100644 index 1f8dbc5af5610..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/data/dns/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto deleted file mode 100644 index 39f44724c430f..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto +++ /dev/null @@ -1,90 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.udp.dns_filter.v3alpha; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/resolver.proto"; -import "envoy/data/dns/v3/dns_table.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - // This message contains the configuration for the DNS Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v3.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - config.core.v3.DataSource external_dns_table = 2; - } - } - - // This message contains the configuration for the DNS Filter operating - // in a client context. This message will contain the timeouts, retry, - // and forwarding configuration for Envoy to make DNS requests to other - // resolvers - // - // [#next-free-field: 6] - message ClientContextConfig { - // Sets the maximum time we will wait for the upstream query to complete - // We allow 5s for the upstream resolution to complete, so the minimum - // value here is 1. Note that the total latency for a failed query is the - // number of retries multiplied by the resolver_timeout. - google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; - - // This field was used for `dns_resolution_config` in Envoy 1.19.0 and - // 1.19.1. - // Control planes that need to set this field for Envoy 1.19.0 and - // 1.19.1 clients should fork the protobufs and change the field type - // to `DnsResolutionConfig`. - // Control planes that need to simultaneously support Envoy 1.18.x and - // Envoy 1.19.x should avoid Envoy 1.19.0 and 1.19.1. - // - // [#not-implemented-hide:] - repeated config.core.v3.Address upstream_resolvers = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - config.core.v3.DnsResolutionConfig dns_resolution_config = 5; - - // Controls how many outstanding external lookup contexts the filter tracks. - // The context structure allows the filter to respond to every query even if the external - // resolution times out or is otherwise unsuccessful - uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration contains the data that the filter uses to respond - // to DNS requests. - ServerContextConfig server_config = 2; - - // Client context configuration controls Envoy's behavior when it must use external - // resolvers to answer a query. This object is optional and if omitted instructs - // the filter to resolve queries from the data in the server_config - ClientContextConfig client_config = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto deleted file mode 100644 index 9d410e28afe3d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto +++ /dev/null @@ -1,85 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.udp.udp_proxy.v3; - -import "envoy/config/core/v3/udp_socket_config.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.udp.udp_proxy.v3"; -option java_outer_classname = "UdpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: UDP proxy] -// UDP proxy :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.udp_proxy] - -// Configuration for the UDP proxy filter. -// [#next-free-field: 7] -message UdpProxyConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig"; - - // Specifies the UDP hash policy. - // The packets can be routed by hash policy. - message HashPolicy { - oneof policy_specifier { - option (validate.required) = true; - - // The source IP will be used to compute the hash used by hash-based load balancing algorithms. - bool source_ip = 1 [(validate.rules).bool = {const: true}]; - - // A given key will be used to compute the hash used by hash-based load balancing algorithms. - // In certain cases there is a need to direct different UDP streams jointly towards the selected set of endpoints. - // A possible use-case is VoIP telephony, where media (RTP) and its corresponding control (RTCP) belong to the same logical session, - // although they travel in separate streams. To ensure that these pair of streams are load-balanced on session level - // (instead of individual stream level), dynamically created listeners can use the same hash key for each stream in the session. - string key = 2 [(validate.rules).string = {min_len: 1}]; - } - } - - // The stat prefix used when emitting UDP proxy filter stats. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2 [(validate.rules).string = {min_len: 1}]; - } - - // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by - // the session. The default if not specified is 1 minute. - google.protobuf.Duration idle_timeout = 3; - - // Use the remote downstream IP address as the sender IP address when sending packets to upstream hosts. - // This option requires Envoy to be run with the *CAP_NET_ADMIN* capability on Linux. - // And the IPv6 stack must be enabled on Linux kernel. - // This option does not preserve the remote downstream port. - // If this option is enabled, the IP address of sent datagrams will be changed to the remote downstream IP address. - // This means that Envoy will not receive packets that are sent by upstream hosts because the upstream hosts - // will send the packets with the remote downstream IP address as the destination. All packets will be routed - // to the remote downstream directly if there are route rules on the upstream host side. - // There are two options to return the packets back to the remote downstream. - // The first one is to use DSR (Direct Server Return). - // The other one is to configure routing rules on the upstream hosts to forward - // all packets back to Envoy and configure iptables rules on the host running Envoy to - // forward all packets from upstream hosts to the Envoy process so that Envoy can forward the packets to the downstream. - // If the platform does not support this option, Envoy will raise a configuration error. - bool use_original_src_ip = 4; - - // Optional configuration for UDP proxy hash policies. If hash_policies is not set, the hash-based - // load balancing algorithms will select a host randomly. Currently the number of hash policies is - // limited to 1. - repeated HashPolicy hash_policies = 5 [(validate.rules).repeated = {max_items: 1}]; - - // UDP socket configuration for upstream sockets. The default for - // :ref:`prefer_gro ` is true for upstream - // sockets as the assumption is datagrams will be received from a single source. - config.core.v3.UdpSocketConfig upstream_socket_config = 6; -} diff --git a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto b/generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto deleted file mode 100644 index 9b110a4893812..0000000000000 --- a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.formatter.metadata.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.formatter.metadata.v3"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Formatter extension for printing various types of metadata] -// [#extension: envoy.formatter.metadata] - -// Metadata formatter extension implements METADATA command operator that -// prints all types of metadata. The first parameter taken by METADATA operator defines -// type of metadata. The following types of metadata are supported (case sensitive): -// -// * DYNAMIC -// * CLUSTER -// * ROUTE -// -// See :ref:`here ` for more information on access log configuration. - -// %METADATA(TYPE:NAMESPACE:KEY):Z% -// :ref:`Metadata ` info, -// where TYPE is type of metadata (see above for supported types), -// NAMESPACE is the filter namespace used when setting the metadata, KEY is an optional -// lookup up key in the namespace with the option of specifying nested keys separated by ':', -// and Z is an optional parameter denoting string truncation up to Z characters long. -// The data will be logged as a JSON string. For example, for the following ROUTE metadata: -// -// ``com.test.my_filter: {"test_key": "foo", "test_object": {"inner_key": "bar"}}`` -// -// * %METADATA(ROUTE:com.test.my_filter)% will log: ``{"test_key": "foo", "test_object": {"inner_key": "bar"}}`` -// * %METADATA(ROUTE:com.test.my_filter:test_key)% will log: ``foo`` -// * %METADATA(ROUTE:com.test.my_filter:test_object)% will log: ``{"inner_key": "bar"}`` -// * %METADATA(ROUTE:com.test.my_filter:test_object:inner_key)% will log: ``bar`` -// * %METADATA(ROUTE:com.unknown_filter)% will log: ``-`` -// * %METADATA(ROUTE:com.test.my_filter:unknown_key)% will log: ``-`` -// * %METADATA(ROUTE:com.test.my_filter):25% will log (truncation at 25 characters): ``{"test_key": "foo", "test`` -// -// .. note:: -// -// For typed JSON logs, this operator renders a single value with string, numeric, or boolean type -// when the referenced key is a simple value. If the referenced key is a struct or list value, a -// JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum -// length is ignored. -// -// .. note:: -// -// METADATA(DYNAMIC:NAMESPACE:KEY):Z is equivalent to :ref:`DYNAMIC_METADATA(NAMESPACE:KEY):Z` -// METADATA(CLUSTER:NAMESPACE:KEY):Z is equivalent to :ref:`CLUSTER_METADATA(NAMASPACE:KEY):Z` - -message Metadata { -} diff --git a/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/BUILD b/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto b/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto deleted file mode 100644 index e1b6c32a97e66..0000000000000 --- a/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.formatter.req_without_query.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.formatter.req_without_query.v3"; -option java_outer_classname = "ReqWithoutQueryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Formatter extension for printing request without query string] -// [#extension: envoy.formatter.req_without_query] - -// ReqWithoutQuery formatter extension implements REQ_WITHOUT_QUERY command operator that -// works the same way as :ref:`REQ ` except that it will -// remove the query string. It is used to avoid logging any sensitive information into -// the access log. -// See :ref:`here ` for more information on access log configuration. - -// %REQ_WITHOUT_QUERY(X?Y):Z% -// An HTTP request header where X is the main HTTP header, Y is the alternative one, and Z is an -// optional parameter denoting string truncation up to Z characters long. The value is taken from -// the HTTP request header named X first and if it's not set, then request header Y is used. If -// none of the headers are present '-' symbol will be in the log. - -// Configuration for the request without query formatter. -message ReqWithoutQuery { -} diff --git a/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/BUILD b/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/BUILD deleted file mode 100644 index 1cb4c6154f26e..0000000000000 --- a/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/health_checker/redis/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/redis.proto b/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/redis.proto deleted file mode 100644 index 10f5c2b30b038..0000000000000 --- a/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/redis.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.health_checkers.redis.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.health_checkers.redis.v3"; -option java_outer_classname = "RedisProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Redis] -// Redis health checker :ref:`configuration overview `. -// [#extension: envoy.health_checkers.redis] - -message Redis { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.health_checker.redis.v2.Redis"; - - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; -} diff --git a/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/BUILD b/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/preserve_case.proto b/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/preserve_case.proto deleted file mode 100644 index 64bdd497ecab0..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/preserve_case.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.http.header_formatters.preserve_case.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.http.header_formatters.preserve_case.v3"; -option java_outer_classname = "PreserveCaseProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Preserve case header formatter] -// [#extension: envoy.http.stateful_header_formatters.preserve_case] - -// Configuration for the preserve case header formatter. -// See the :ref:`header casing ` configuration guide for more -// information. -message PreserveCaseFormatterConfig { -} diff --git a/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/BUILD b/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/BUILD deleted file mode 100644 index 9a76b7e148e03..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/custom_header.proto b/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/custom_header.proto deleted file mode 100644 index 5ea93d7548438..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/custom_header.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.http.original_ip_detection.custom_header.v3; - -import "envoy/type/v3/http_status.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.http.original_ip_detection.custom_header.v3"; -option java_outer_classname = "CustomHeaderProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Custom header original IP detection extension] - -// This extension allows for the original downstream remote IP to be detected -// by reading the value from a configured header name. If the value is successfully parsed -// as an IP, it'll be treated as the effective downstream remote address and seen as such -// by all filters. See :ref:`original_ip_detection_extensions -// ` -// for an overview of how extensions operate and what happens when an extension fails -// to detect the remote IP. -// -// [#extension: envoy.http.original_ip_detection.custom_header] -message CustomHeaderConfig { - // The header name containing the original downstream remote address, if present. - // - // Note: in the case of a multi-valued header, only the first value is tried and the rest are ignored. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: true}]; - - // If set to true, the extension could decide that the detected address should be treated as - // trusted by the HCM. If the address is considered :ref:`trusted`, - // it might be used as input to determine if the request is internal (among other things). - bool allow_extension_to_set_address_as_trusted = 2; - - // If this is set, the request will be rejected when detection fails using it as the HTTP response status. - // - // .. note:: - // If this is set to < 400 or > 511, the default status 403 will be used instead. - type.v3.HttpStatus reject_with_status = 3; -} diff --git a/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/BUILD b/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto b/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto deleted file mode 100644 index 6864788f9f185..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.http.original_ip_detection.xff.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.http.original_ip_detection.xff.v3"; -option java_outer_classname = "XffProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: XFF original IP detection extension] - -// This extension allows for the original downstream remote IP to be detected -// by reading the :ref:`config_http_conn_man_headers_x-forwarded-for` header. -// -// [#extension: envoy.http.original_ip_detection.xff] -message XffConfig { - // The number of additional ingress proxy hops from the right side of the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when - // determining the origin client's IP address. The default is zero if this option - // is not specified. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - uint32 xff_num_trusted_hops = 1; -} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto deleted file mode 100644 index 90da16095fa95..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.internal_redirect.allow_listed_routes.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.allow_listed_routes.v3"; -option java_outer_classname = "AllowListedRoutesConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Allow listed routes internal redirect predicate] - -// An internal redirect predicate that accepts only explicitly allowed target routes. -// [#extension: envoy.internal_redirect_predicates.allow_listed_routes] -message AllowListedRoutesConfig { - // The list of routes that's allowed as redirect target by this predicate, - // identified by the route's :ref:`name `. - // Empty route names are not allowed. - repeated string allowed_route_names = 1 - [(validate.rules).repeated = {items {string {min_len: 1}}}]; -} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto deleted file mode 100644 index c8b03e07b4b66..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.internal_redirect.previous_routes.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.previous_routes.v3"; -option java_outer_classname = "PreviousRoutesConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Previous routes internal redirect predicate] - -// An internal redirect predicate that rejects redirect targets that are pointing -// to a route that has been followed by a previous redirect from the current route. -// [#extension: envoy.internal_redirect_predicates.previous_routes] -message PreviousRoutesConfig { -} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto deleted file mode 100644 index e3638adb9fdb1..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.internal_redirect.safe_cross_scheme.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.safe_cross_scheme.v3"; -option java_outer_classname = "SafeCrossSchemeConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SafeCrossScheme internal redirect predicate] - -// An internal redirect predicate that checks the scheme between the -// downstream url and the redirect target url and allows a) same scheme -// redirect and b) safe cross scheme redirect, which means if the downstream -// scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the -// downstream scheme is HTTP, only HTTP redirect targets are allowed. -// [#extension: envoy.internal_redirect_predicates.safe_cross_scheme] -message SafeCrossSchemeConfig { -} diff --git a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD b/generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto b/generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto deleted file mode 100644 index 0eff4feb8f941..0000000000000 --- a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.key_value.file_based.v3; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.key_value.file_based.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: File Based Key Value Store storage plugin] - -// [#alpha:] -// [#extension: envoy.key_value.file_based] -// This is configuration to flush a key value store out to disk. -message FileBasedKeyValueStoreConfig { - // The filename to read the keys and values from, and write the keys and - // values to. - string filename = 1 [(validate.rules).string = {min_len: 1}]; - - // The interval at which the key value store should be flushed to the file. - google.protobuf.Duration flush_interval = 2; -} diff --git a/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/BUILD b/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/input.proto b/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/input.proto deleted file mode 100644 index 6bbe86e688644..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/input.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.matching.common_inputs.environment_variable.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.matching.common_inputs.environment_variable.v3"; -option java_outer_classname = "InputProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Environment Variable Input] -// [#extension: envoy.matching.common_inputs.environment_variable] - -// Reads an environment variable to provide an input for matching. -message Config { - // Name of the environment variable to read from. - string name = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/BUILD b/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto b/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto deleted file mode 100644 index c44b0b89d57bd..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.matching.input_matchers.consistent_hashing.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.matching.input_matchers.consistent_hashing.v3"; -option java_outer_classname = "ConsistentHashingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Consistent Hashing Matcher] -// [#extension: envoy.matching.input_matchers.consistent_hashing] - -// The consistent hashing matchers computes a consistent hash from the input and matches if the resulting hash -// is within the configured threshold. -// More specifically, this matcher evaluates to true if hash(input, seed) % modulo >= threshold. -// Note that the consistency of the match result relies on the internal hash function (xxhash) remaining -// unchanged. While this is unlikely to happen intentionally, this could cause inconsistent match results -// between deployments. -message ConsistentHashing { - // The threshold the resulting hash must be over in order for this matcher to evaluate to true. - // This value must be below the configured modulo value. - // Setting this to 0 is equivalent to this matcher always matching. - uint32 threshold = 1; - - // The value to use for the modulus in the calculation. This effectively bounds the hash output, - // specifying the range of possible values. - // This value must be above the configured threshold. - uint32 modulo = 2 [(validate.rules).uint32 = {gt: 0}]; - - // Optional seed passed through the hash function. This allows using additional information when computing - // the hash value: by changing the seed value, a different partition of matching and non-matching inputs will - // be created that remains consistent for that seed value. - uint64 seed = 3; -} diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD b/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto b/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto deleted file mode 100644 index 3c7cb4eb5f19a..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.matching.input_matchers.ip.v3; - -import "envoy/config/core/v3/address.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.matching.input_matchers.ip.v3"; -option java_outer_classname = "IpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: IP matcher] -// [#extension: envoy.matching.input_matchers.ip] - -// This input matcher matches IPv4 or IPv6 addresses against a list of CIDR -// ranges. It returns true if and only if the input IP belongs to at least one -// of these CIDR ranges. Internally, it uses a Level-Compressed trie, as -// described in the paper `IP-address lookup using LC-tries -// `_ -// by S. Nilsson and G. Karlsson. For "big" lists of IPs, this matcher is more -// efficient than multiple single IP matcher, that would have a linear cost. -message Ip { - // Match if the IP belongs to any of these CIDR ranges. - repeated config.core.v3.CidrRange cidr_ranges = 1 [(validate.rules).repeated = {min_items: 1}]; - - // The human readable prefix to use when emitting statistics for the IP input - // matcher. Names in the table below are concatenated to this prefix. - // - // .. csv-table:: - // :header: Name, Type, Description - // :widths: 1, 1, 2 - // - // ip_parsing_failed, Counter, Total number of IP addresses the matcher was unable to parse - string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto deleted file mode 100644 index d2c747ec49fb1..0000000000000 --- a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.network.socket_interface.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.network.socket_interface.v3"; -option java_outer_classname = "DefaultSocketInterfaceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Default Socket Interface configuration] - -// Configuration for default socket interface that relies on OS dependent syscall to create -// sockets. -message DefaultSocketInterface { -} diff --git a/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/BUILD b/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/crypto_stream.proto b/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/crypto_stream.proto deleted file mode 100644 index 6313f79861e84..0000000000000 --- a/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/crypto_stream.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.quic.crypto_stream.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.quic.crypto_stream.v3"; -option java_outer_classname = "CryptoStreamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: QUIC server crypto stream config] -// [#extension: envoy.quic.crypto_stream.server.quiche] - -// Configuration for the default QUIC server crypto stream provided by QUICHE. -message CryptoServerStreamConfig { -} diff --git a/generated_api_shadow/envoy/extensions/quic/proof_source/v3/BUILD b/generated_api_shadow/envoy/extensions/quic/proof_source/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/quic/proof_source/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/quic/proof_source/v3/proof_source.proto b/generated_api_shadow/envoy/extensions/quic/proof_source/v3/proof_source.proto deleted file mode 100644 index 1459142d40914..0000000000000 --- a/generated_api_shadow/envoy/extensions/quic/proof_source/v3/proof_source.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.quic.proof_source.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.quic.proof_source.v3"; -option java_outer_classname = "ProofSourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: QUIC proof source config] -// [#extension: envoy.quic.proof_source.filter_chain] - -// Configuration for the default QUIC proof source. -message ProofSourceConfig { -} diff --git a/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/BUILD b/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/BUILD deleted file mode 100644 index facd82ce6de26..0000000000000 --- a/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - ], -) diff --git a/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/expr.proto b/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/expr.proto deleted file mode 100644 index 76d3505cba04a..0000000000000 --- a/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/expr.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.rate_limit_descriptors.expr.v3; - -import "google/api/expr/v1alpha1/syntax.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.rate_limit_descriptors.expr.v3"; -option java_outer_classname = "ExprProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit descriptor expression] -// [#extension: envoy.rate_limit_descriptors.expr] - -// The following descriptor entry is appended with a value computed -// from a symbolic Common Expression Language expression. -// See :ref:`attributes ` for the set of -// available attributes. -// -// .. code-block:: cpp -// -// ("", "") -message Descriptor { - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // If set to true, Envoy skips the descriptor if the expression evaluates to an error. - // By default, the rate limit is not applied when an expression produces an error. - bool skip_if_error = 2; - - oneof expr_specifier { - // Expression in a text form, e.g. "connection.requested_server_name". - string text = 3 [(validate.rules).string = {min_len: 1}]; - - // Parsed expression in AST form. - google.api.expr.v1alpha1.Expr parsed = 4; - } -} diff --git a/generated_api_shadow/envoy/extensions/request_id/uuid/v3/BUILD b/generated_api_shadow/envoy/extensions/request_id/uuid/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/request_id/uuid/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/request_id/uuid/v3/uuid.proto b/generated_api_shadow/envoy/extensions/request_id/uuid/v3/uuid.proto deleted file mode 100644 index 5c3f00da28d71..0000000000000 --- a/generated_api_shadow/envoy/extensions/request_id/uuid/v3/uuid.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.request_id.uuid.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.request_id.uuid.v3"; -option java_outer_classname = "UuidProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: UUID] -// [#extension: envoy.request_id.uuid] - -// Configuration for the default UUID request ID extension which has the following behavior: -// -// 1. Request ID is propagated using the :ref:`x-request-id -// ` header. -// -// 2. Request ID is a universally unique identifier `(UUID4) -// `_. -// -// 3. Tracing decision (sampled, forced, etc) is set in 14th nibble of the UUID. By default this will -// overwrite existing UUIDs received in the *x-request-id* header if the trace sampling decision -// is changed. The 14th nibble of the UUID4 has been chosen because it is fixed to '4' by the -// standard. Thus, '4' indicates a default UUID and no trace status. This nibble is swapped to: -// -// a. '9': Sampled. -// b. 'a': Force traced due to server-side override. -// c. 'b': Force traced due to client-side request ID joining. -// -// See the :ref:`x-request-id ` documentation for -// more information. -message UuidRequestIdConfig { - // Whether the implementation alters the UUID to contain the trace sampling decision as per the - // `UuidRequestIdConfig` message documentation. This defaults to true. If disabled no - // modification to the UUID will be performed. It is important to note that if disabled, - // stable sampling of traces, access logs, etc. will no longer work and only random sampling will - // be possible. - google.protobuf.BoolValue pack_trace_reason = 1; - - // Set whether to use :ref:`x-request-id` for sampling or not. - // This defaults to true. See the :ref:`context propagation ` - // overview for more information. - google.protobuf.BoolValue use_request_id_for_trace_sampling = 2; -} diff --git a/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/BUILD b/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/BUILD deleted file mode 100644 index 3fb51ff1ccaa9..0000000000000 --- a/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/fixed_heap.proto b/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/fixed_heap.proto deleted file mode 100644 index 48aaa0a0268e4..0000000000000 --- a/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/fixed_heap.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.resource_monitors.fixed_heap.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.resource_monitors.fixed_heap.v3"; -option java_outer_classname = "FixedHeapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Fixed heap] -// [#extension: envoy.resource_monitors.fixed_heap] - -// The fixed heap resource monitor reports the Envoy process memory pressure, computed as a -// fraction of currently reserved heap memory divided by a statically configured maximum -// specified in the FixedHeapConfig. -message FixedHeapConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig"; - - uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/BUILD b/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/BUILD deleted file mode 100644 index 975b8fcbd5a32..0000000000000 --- a/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/injected_resource.proto b/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/injected_resource.proto deleted file mode 100644 index 643ea68651c73..0000000000000 --- a/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/injected_resource.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.resource_monitors.injected_resource.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.resource_monitors.injected_resource.v3"; -option java_outer_classname = "InjectedResourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Injected resource] -// [#extension: envoy.resource_monitors.injected_resource] - -// The injected resource monitor allows injecting a synthetic resource pressure into Envoy -// via a text file, which must contain a floating-point number in the range [0..1] representing -// the resource pressure and be updated atomically by a symbolic link swap. -// This is intended primarily for integration tests to force Envoy into an overloaded state. -message InjectedResourceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig"; - - string filename = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/BUILD b/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/BUILD deleted file mode 100644 index 0eab79b89fdaf..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/retry/omit_canary_hosts/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/omit_canary_hosts.proto b/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/omit_canary_hosts.proto deleted file mode 100644 index 930cced837036..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/omit_canary_hosts.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.retry.host.omit_canary_hosts.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.retry.host.omit_canary_hosts.v3"; -option java_outer_classname = "OmitCanaryHostsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Omit Canary Hosts Predicate] -// [#extension: envoy.retry_host_predicates.omit_canary_hosts] - -message OmitCanaryHostsPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.retry.omit_canary_hosts.v2.OmitCanaryHostsPredicate"; -} diff --git a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto b/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto deleted file mode 100644 index fb7adf4402880..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.retry.host.omit_host_metadata.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.retry.host.omit_host_metadata.v3"; -option java_outer_classname = "OmitHostMetadataConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Omit host metadata retry predicate] - -// A retry host predicate that can be used to reject a host based on -// predefined metadata match criteria. -// [#extension: envoy.retry_host_predicates.omit_host_metadata] -message OmitHostMetadataConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.retry.omit_host_metadata.v2.OmitHostMetadataConfig"; - - // Retry host predicate metadata match criteria. The hosts in - // the upstream cluster with matching metadata will be omitted while - // attempting a retry of a failed request. The metadata should be specified - // under the *envoy.lb* key. - config.core.v3.Metadata metadata_match = 1; -} diff --git a/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/BUILD b/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/BUILD deleted file mode 100644 index 88d9a6e255a3a..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/retry/previous_hosts/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/previous_hosts.proto b/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/previous_hosts.proto deleted file mode 100644 index addce657fefed..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/previous_hosts.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.retry.host.previous_hosts.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.retry.host.previous_hosts.v3"; -option java_outer_classname = "PreviousHostsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Previous Hosts Predicate] -// [#extension: envoy.retry_host_predicates.previous_hosts] - -message PreviousHostsPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.retry.previous_hosts.v2.PreviousHostsPredicate"; -} diff --git a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/BUILD b/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto b/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto deleted file mode 100644 index b6a4bbecbae8c..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.retry.priority.previous_priorities.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.retry.priority.previous_priorities.v3"; -option java_outer_classname = "PreviousPrioritiesConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Previous priorities retry selector] - -// A retry host selector that attempts to spread retries between priorities, even if certain -// priorities would not normally be attempted due to higher priorities being available. -// -// As priorities get excluded, load will be distributed amongst the remaining healthy priorities -// based on the relative health of the priorities, matching how load is distributed during regular -// host selection. For example, given priority healths of {100, 50, 50}, the original load will be -// {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load -// changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the -// remaining to spill over to P2. -// -// Each priority attempted will be excluded until there are no healthy priorities left, at which -// point the list of attempted priorities will be reset, essentially starting from the beginning. -// For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the -// following sequence of priorities would be selected (assuming update_frequency = 1): -// Attempt 1: P0 (P0 is 100% healthy) -// Attempt 2: P2 (P0 already attempted, P2 only healthy priority) -// Attempt 3: P0 (no healthy priorities, reset) -// Attempt 4: P2 -// -// In the case of all upstream hosts being unhealthy, no adjustments will be made to the original -// priority load, so behavior should be identical to not using this plugin. -// -// Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of -// priorities), which might incur significant overhead for clusters with many priorities. -// [#extension: envoy.retry_priorities.previous_priorities] -message PreviousPrioritiesConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.retry.previous_priorities.PreviousPrioritiesConfig"; - - // How often the priority load should be updated based on previously attempted priorities. Useful - // to allow each priorities to receive more than one request before being excluded or to reduce - // the number of times that the priority load has to be recomputed. - // - // For example, by setting this to 2, then the first two attempts (initial attempt and first - // retry) will use the unmodified priority load. The third and fourth attempt will use priority - // load which excludes the priorities routed to with the first two attempts, and the fifth and - // sixth attempt will use the priority load excluding the priorities used for the first four - // attempts. - // - // Must be greater than 0. - int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/BUILD b/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/graphite_statsd.proto b/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/graphite_statsd.proto deleted file mode 100644 index 72306389bfeca..0000000000000 --- a/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/graphite_statsd.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.stat_sinks.graphite_statsd.v3; - -import "envoy/config/core/v3/address.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.stat_sinks.graphite_statsd.v3"; -option java_outer_classname = "GraphiteStatsdProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Graphite+Statsd] -// Stats configuration proto schema for ``envoy.stat_sinks.graphite_statsd`` sink. -// The sink emits stats with `Graphite `_ -// compatible tags. Tags are configurable via :ref:`StatsConfig -// `. -// [#extension: envoy.stat_sinks.graphite_statsd] - -message GraphiteStatsdSink { - oneof statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running Graphite-compliant listener. If specified, - // statistics will be flushed to this address. - config.core.v3.Address address = 1; - } - - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. - string prefix = 3; - - // Optional max datagram size to use when sending UDP messages. By default Envoy - // will emit one metric per datagram. By specifying a max-size larger than a single - // metric, Envoy will emit multiple, new-line separated metrics. The max datagram - // size should not exceed your network's MTU. - // - // Note that this value may not be respected if smaller than a single metric. - google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD deleted file mode 100644 index c37174bdefc46..0000000000000 --- a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/wasm/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto deleted file mode 100644 index 9d61eda713c78..0000000000000 --- a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.stat_sinks.wasm.v3; - -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.stat_sinks.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm] -// Wasm :ref:`configuration overview `. -// [#extension: envoy.stat_sinks.wasm] - -message Wasm { - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/BUILD deleted file mode 100644 index 8a8435d89897d..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/transport_socket/alts/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto b/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto deleted file mode 100644 index 93c6f9b834efa..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.alts.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.alts.v3"; -option java_outer_classname = "AltsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: ALTS] -// [#extension: envoy.transport_sockets.alts] - -// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy. -// Store the peer identity in dynamic metadata, namespace is "envoy.transport_socket.peer_information", key is "peer_identity". -// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ -message Alts { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.transport_socket.alts.v2alpha.Alts"; - - // The location of a handshaker service, this is usually 169.254.169.254:8080 - // on GCE. - string handshaker_service = 1 [(validate.rules).string = {min_len: 1}]; - - // The acceptable service accounts from peer, peers not in the list will be rejected in the - // handshake validation step. If empty, no validation will be performed. - repeated string peer_service_accounts = 2; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto deleted file mode 100644 index 687226574d29b..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.proxy_protocol.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/proxy_protocol.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3"; -option java_outer_classname = "UpstreamProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Upstream Proxy Protocol] -// [#extension: envoy.transport_sockets.upstream_proxy_protocol] - -// Configuration for PROXY protocol socket -message ProxyProtocolUpstreamTransport { - // The PROXY protocol settings - config.core.v3.ProxyProtocolConfig config = 1; - - // The underlying transport socket being wrapped. - config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD deleted file mode 100644 index 3ca8242f77801..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto deleted file mode 100644 index 25122b09c5972..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.quic.v3; - -import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v3"; -option java_outer_classname = "QuicTransportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: quic transport] -// [#comment:#extension: envoy.transport_sockets.quic] - -// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. -message QuicDownstreamTransport { - tls.v3.DownstreamTlsContext downstream_tls_context = 1 - [(validate.rules).message = {required: true}]; -} - -// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. -message QuicUpstreamTransport { - tls.v3.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto b/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto deleted file mode 100644 index 85406c1f77135..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.raw_buffer.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.raw_buffer.v3"; -option java_outer_classname = "RawBufferProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Raw Buffer] -// [#extension: envoy.transport_sockets.raw_buffer] - -// Configuration for raw buffer transport socket. -message RawBuffer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.transport_socket.raw_buffer.v2.RawBuffer"; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/BUILD deleted file mode 100644 index 7ae3c01a99470..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/starttls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/starttls.proto deleted file mode 100644 index 69254819baf7b..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/starttls.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.starttls.v3; - -import "envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto"; -import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.starttls.v3"; -option java_outer_classname = "StarttlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: StartTls] -// [#extension: envoy.transport_sockets.starttls] - -// StartTls transport socket addresses situations when a protocol starts in clear-text and -// negotiates an in-band switch to TLS. StartTls transport socket is protocol agnostic. In the -// case of downstream StartTls a network filter is required which understands protocol exchange -// and a state machine to signal to the StartTls transport socket when a switch to TLS is -// required. Similarly, upstream StartTls requires the owner of an upstream transport socket to -// manage the state machine necessary to properly coordinate negotiation with the upstream and -// signal to the transport socket when a switch to secure transport is required. - -// Configuration for a downstream StartTls transport socket. -// StartTls transport socket wraps two sockets: -// * raw_buffer socket which is used at the beginning of the session -// * TLS socket used when a protocol negotiates a switch to encrypted traffic. -message StartTlsConfig { - // (optional) Configuration for clear-text socket used at the beginning of the session. - raw_buffer.v3.RawBuffer cleartext_socket_config = 1; - - // Configuration for a downstream TLS socket. - transport_sockets.tls.v3.DownstreamTlsContext tls_socket_config = 2 - [(validate.rules).message = {required: true}]; -} - -// Configuration for an upstream StartTls transport socket. -// StartTls transport socket wraps two sockets: -// * raw_buffer socket which is used at the beginning of the session -// * TLS socket used when a protocol negotiates a switch to encrypted traffic. -message UpstreamStartTlsConfig { - // (optional) Configuration for clear-text socket used at the beginning of the session. - raw_buffer.v3.RawBuffer cleartext_socket_config = 1; - - // Configuration for an upstream TLS socket. - transport_sockets.tls.v3.UpstreamTlsContext tls_socket_config = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/BUILD deleted file mode 100644 index b97db3d63736c..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto deleted file mode 100644 index ef61575f67f72..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tap.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/common/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v3"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap] -// [#extension: envoy.transport_sockets.tap] - -// Configuration for tap transport socket. This wraps another transport socket, providing the -// ability to interpose and record in plain text any traffic that is surfaced to Envoy. -message Tap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.transport_socket.tap.v2alpha.Tap"; - - // Common configuration for the tap transport socket. - common.tap.v3.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; - - // The underlying transport socket being wrapped. - config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD deleted file mode 100644 index 47b9b9ae57e96..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/auth:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto deleted file mode 100644 index b451d45381ca4..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto +++ /dev/null @@ -1,11 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import public "envoy/extensions/transport_sockets/tls/v3/common.proto"; -import public "envoy/extensions/transport_sockets/tls/v3/secret.proto"; -import public "envoy/extensions/transport_sockets/tls/v3/tls.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "CertProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto deleted file mode 100644 index 1a86020683507..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto +++ /dev/null @@ -1,441 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; - - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for - // servers. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). - // - // If not specified, a default list will be used. Defaults are different for server (downstream) and - // client (upstream) TLS configurations. - // - // In non-FIPS builds, the default server cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default server cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In non-FIPS builds, the default client cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // - // In builds using :ref:`BoringSSL FIPS `, the default client cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.PrivateKeyProvider"; - - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_len: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [ - deprecated = true, - (udpa.annotations.sensitive) = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - } -} - -// [#next-free-field: 8] -message TlsCertificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; - - // The TLS certificate chain. - // - // If *certificate_chain* is a filesystem path, a watch will be added to the - // parent directory for any file moves to support rotation. This currently - // only applies to dynamic secrets, when the *TlsCertificate* is delivered via - // SDS. - config.core.v3.DataSource certificate_chain = 1; - - // The TLS private key. - // - // If *private_key* is a filesystem path, a watch will be added to the parent - // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *TlsCertificate* is delivered via SDS. - config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // If specified, updates of file-based *certificate_chain* and *private_key* - // sources will be triggered by this watch. The certificate/key pair will be - // read together and validated for atomic read consistency (i.e. no - // intervening modification occurred between cert/key read, verified by file - // hash comparisons). This allows explicit control over the path watched, by - // default the parent directories of the filesystem paths in - // *certificate_chain* and *private_key* are watched if this field is not - // specified. This only applies when a *TlsCertificate* is delivered by SDS - // with references to filesystem paths. See the :ref:`SDS key rotation - // ` documentation for further details. - config.core.v3.WatchedDirectory watched_directory = 7; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // The OCSP response to be stapled with this certificate during the handshake. - // The response must be DER-encoded and may only be provided via ``filename`` or - // ``inline_bytes``. The response may pertain to only one certificate. - config.core.v3.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated config.core.v3.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.TlsSessionTicketKeys"; - - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated config.core.v3.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// Indicates a certificate to be obtained from a named CertificateProvider plugin instance. -// The plugin instances are defined in the client's bootstrap file. -// The plugin allows certificates to be fetched/refreshed over the network asynchronously with -// respect to the TLS handshake. -// [#not-implemented-hide:] -message CertificateProviderPluginInstance { - // Provider instance name. If not present, defaults to "default". - // - // Instance names should generally be defined not in terms of the underlying provider - // implementation (e.g., "file_watcher") but rather in terms of the function of the - // certificates (e.g., "foo_deployment_identity"). - string instance_name = 1; - - // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "example.com" to specify a certificate for a - // particular domain. Not all provider instances will actually use this field, so the value - // defaults to the empty string. - string certificate_name = 2; -} - -// [#next-free-field: 14] -message CertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CertificateValidationContext"; - - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - reserved 5; - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. Note - // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be - // provided for all certificate authorities in that chain. Failure to do so will result in - // verification failure for both revoked and unrevoked certificates from that chain. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - // - // If *trusted_ca* is a filesystem path, a watch will be added to the parent - // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *CertificateValidationContext* is - // delivered via SDS. - // - // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. - // - // [#next-major-version: This field and watched_directory below should ideally be moved into a - // separate sub-message, since there's no point in specifying the latter field without this one.] - config.core.v3.DataSource trusted_ca = 1 - [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; - - // Certificate provider instance for fetching TLS certificates. - // - // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. - // [#not-implemented-hide:] - CertificateProviderPluginInstance ca_certificate_provider_instance = 13 - [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; - - // If specified, updates of a file-based *trusted_ca* source will be triggered - // by this watch. This allows explicit control over the path watched, by - // default the parent directory of the filesystem path in *trusted_ca* is - // watched if this field is not specified. This only applies when a - // *CertificateValidationContext* is delivered by SDS with references to - // filesystem paths. See the :ref:`SDS key rotation ` - // documentation for further details. - config.core.v3.WatchedDirectory watched_directory = 11; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative name matchers. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matchers. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. Note that if a CRL is provided - // for any certificate authority in a trust chain, a CRL must be provided - // for all certificate authorities in that chain. Failure to do so will - // result in verification failure for both revoked and unrevoked certificates - // from that chain. - config.core.v3.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; - - // The configuration of an extension specific certificate validator. - // If specified, all validation is done by the specified validator, - // and the behavior of all other validation settings is defined by the specified validator (and may be entirely ignored, unused, and unvalidated). - // Refer to the documentation for the specified validator. If you do not want a custom validation algorithm, do not set this field. - // [#extension-category: envoy.tls.cert_validator] - config.core.v3.TypedExtensionConfig custom_validator_config = 12; - - repeated string hidden_envoy_deprecated_verify_subject_alt_name = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto deleted file mode 100644 index f7c849c0334e1..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/extensions/transport_sockets/tls/v3/common.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "SecretProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Secrets configuration] - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; - - // Name by which the secret can be uniquely referred to. When both name and config are specified, - // then secret can be fetched and/or reloaded via SDS. When only name is specified, then secret - // will be loaded from static resources. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - config.core.v3.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto deleted file mode 100644 index f680207955a8c..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ /dev/null @@ -1,302 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import "envoy/config/core/v3/extension.proto"; -import "envoy/extensions/transport_sockets/tls/v3/common.proto"; -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "TlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: TLS transport socket] -// [#extension: envoy.transport_sockets.tls] -// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 9] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.DownstreamTlsContext"; - - enum OcspStaplePolicy { - // OCSP responses are optional. If an OCSP response is absent - // or expired, the associated certificate will be used for - // connections without an OCSP staple. - LENIENT_STAPLING = 0; - - // OCSP responses are optional. If an OCSP response is absent, - // the associated certificate will be used without an - // OCSP staple. If a response is provided but is expired, - // the associated certificate will not be used for - // subsequent connections. If no suitable certificate is found, - // the connection is rejected. - STRICT_STAPLING = 1; - - // OCSP responses are required. Configuration will fail if - // a certificate is provided without an OCSP response. If a - // response expires, the associated certificate will not be - // used connections. If no suitable certificate is found, the - // connection is rejected. - MUST_STAPLE = 2; - } - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; - - // Config for whether to use certificates if they do not have - // an accompanying OCSP response or if the response expires at runtime. - // Defaults to LENIENT_STAPLING - OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 15] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; - - // Config for Certificate provider to get certificates. This provider should allow certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // - // DEPRECATED: This message is not currently used, but if we ever do need it, we will want to - // move it out of CommonTlsContext and into common.proto, similar to the existing - // CertificateProviderPluginInstance message. - // - // [#not-implemented-hide:] - message CertificateProvider { - // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Provider specific config. - // Note: an implementation is expected to dedup multiple instances of the same config - // to maintain a single certificate-provider instance. The sharing can happen, for - // example, among multiple clusters or between the tls_certificate and validation_context - // certificate providers of a cluster. - // This config could be supplied inline or (in future) a named xDS resource. - oneof config { - option (validate.required) = true; - - config.core.v3.TypedExtensionConfig typed_config = 2; - } - } - - // Similar to CertificateProvider above, but allows the provider instances to be configured on - // the client side instead of being sent from the control plane. - // - // DEPRECATED: This message was moved outside of CommonTlsContext - // and now lives in common.proto. - // - // [#not-implemented-hide:] - message CertificateProviderInstance { - // Provider instance name. This name must be defined in the client's configuration (e.g., a - // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config - // field that would be sent in the CertificateProvider message if the config was sent by the - // control plane). If not present, defaults to "default". - // - // Instance names should generally be defined not in terms of the underlying provider - // implementation (e.g., "file_watcher") but rather in terms of the function of the - // certificates (e.g., "foo_deployment_identity"). - string instance_name = 1; - - // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "example.com" to specify a certificate for a - // particular domain. Not all provider instances will actually use this field, so the value - // defaults to the empty string. - string certificate_name = 2; - } - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - - // Certificate provider for fetching CA certs. This will populate the - // *default_validation_context.trusted_ca* field. - // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Certificate provider instance for fetching CA certs. This will populate the - // *default_validation_context.trusted_ca* field. - // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - // - // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, - // and *tls_certificate_provider_instance* may be used. - // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's - // not legal to put a repeated field in a oneof. In the next major version, we should rework - // this to avoid this problem.] - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // - // The same number and types of certificates as :ref:`tls_certificates ` - // are valid in the the certificates fetched through this setting. - // - // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, - // and *tls_certificate_provider_instance* may be used. - // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's - // not legal to put a repeated field in a oneof. In the next major version, we should rework - // this to avoid this problem.] - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 2}]; - - // Certificate provider instance for fetching TLS certs. - // - // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, - // and *tls_certificate_provider_instance* may be used. - // [#not-implemented-hide:] - CertificateProviderPluginInstance tls_certificate_provider_instance = 14; - - // Certificate provider for fetching TLS certificates. - // [#not-implemented-hide:] - CertificateProvider tls_certificate_certificate_provider = 9 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Certificate provider instance for fetching TLS certificates. - // [#not-implemented-hide:] - CertificateProviderInstance tls_certificate_certificate_provider_instance = 11 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - - // Certificate provider for fetching validation context. - // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 10 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Certificate provider instance for fetching validation context. - // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 12 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; - - // Custom TLS handshaker. If empty, defaults to native TLS handshaking - // behavior. - config.core.v3.TypedExtensionConfig custom_handshaker = 13; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto deleted file mode 100644 index cfb5e5c07e90c..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto +++ /dev/null @@ -1,59 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "TlsSpiffeValidatorConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SPIFFE Certificate Validator] -// [#extension: envoy.tls.cert_validator.spiffe] - -// Configuration specific to the `SPIFFE `_ certificate validator. -// -// Example: -// -// .. validated-code-block:: yaml -// :type-name: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext -// -// custom_validator_config: -// name: envoy.tls.cert_validator.spiffe -// typed_config: -// "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig -// trust_domains: -// - name: foo.com -// trust_bundle: -// filename: "foo.pem" -// - name: envoy.com -// trust_bundle: -// filename: "envoy.pem" -// -// In this example, a presented peer certificate whose SAN matches `spiffe//foo.com/**` is validated against -// the "foo.pem" x.509 certificate. All the trust bundles are isolated from each other, so no trust domain can mint -// a SVID belonging to another trust domain. That means, in this example, a SVID signed by `envoy.com`'s CA with `spiffe//foo.com/**` -// SAN would be rejected since Envoy selects the trust bundle according to the presented SAN before validate the certificate. -// -// Note that SPIFFE validator inherits and uses the following options from :ref:`CertificateValidationContext `. -// -// - :ref:`allow_expired_certificate ` to allow expired certificates. -// - :ref:`match_subject_alt_names ` to match **URI** SAN of certificates. Unlike the default validator, SPIFFE validator only matches **URI** SAN (which equals to SVID in SPIFFE terminology) and ignore other SAN types. -// -message SPIFFECertValidatorConfig { - message TrustDomain { - // Name of the trust domain, `example.com`, `foo.bar.gov` for example. - // Note that this must *not* have "spiffe://" prefix. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Specify a data source holding x.509 trust bundle used for validating incoming SVID(s) in this trust domain. - config.core.v3.DataSource trust_bundle = 2; - } - - // This field specifies trust domains used for validating incoming X.509-SVID(s). - repeated TrustDomain trust_domains = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto deleted file mode 100644 index 44e207172c9b1..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.generic.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3"; -option java_outer_classname = "GenericConnectionPoolProtoOuterClass"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Generic Connection Pool] - -// A connection pool which forwards downstream HTTP as TCP or HTTP to upstream, -// based on CONNECT configuration. -// [#extension: envoy.upstreams.http.generic] -message GenericConnectionPoolProto { -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto deleted file mode 100644 index 8318f3c666d90..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.http.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3"; -option java_outer_classname = "HttpConnectionPoolProtoOuterClass"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Http Connection Pool] - -// A connection pool which forwards downstream HTTP as HTTP to upstream. -// [#extension: envoy.upstreams.http.http] -message HttpConnectionPoolProto { -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto deleted file mode 100644 index 7c1d633432e9b..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.tcp.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3"; -option java_outer_classname = "TcpConnectionPoolProtoOuterClass"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tcp Connection Pool] - -// A connection pool which forwards downstream HTTP as TCP to upstream, -// [#extension: envoy.upstreams.http.tcp] -message TcpConnectionPoolProto { -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto b/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto deleted file mode 100644 index 271dcfbe49cec..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto +++ /dev/null @@ -1,151 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.v3; - -import "envoy/config/core/v3/protocol.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.v3"; -option java_outer_classname = "HttpProtocolOptionsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP Protocol Options] -// [#extension: envoy.upstreams.http.http_protocol_options] - -// HttpProtocolOptions specifies Http upstream protocol options. This object -// is used in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.extensions.upstreams.http.v3.HttpProtocolOptions`. -// -// This controls what protocol(s) should be used for upstream and how said protocol(s) are configured. -// -// This replaces the prior pattern of explicit protocol configuration directly -// in the cluster. So a configuration like this, explicitly configuring the use of HTTP/2 upstream: -// -// .. code:: -// -// clusters: -// - name: some_service -// connect_timeout: 5s -// upstream_http_protocol_options: -// auto_sni: true -// common_http_protocol_options: -// idle_timeout: 1s -// http2_protocol_options: -// max_concurrent_streams: 100 -// .... [further cluster config] -// -// Would now look like this: -// -// .. code:: -// -// clusters: -// - name: some_service -// connect_timeout: 5s -// typed_extension_protocol_options: -// envoy.extensions.upstreams.http.v3.HttpProtocolOptions: -// "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions -// upstream_http_protocol_options: -// auto_sni: true -// common_http_protocol_options: -// idle_timeout: 1s -// explicit_http_config: -// http2_protocol_options: -// max_concurrent_streams: 100 -// .... [further cluster config] -// [#next-free-field: 6] -message HttpProtocolOptions { - // If this is used, the cluster will only operate on one of the possible upstream protocols. - // Note that HTTP/2 or above should generally be used for upstream gRPC clusters. - message ExplicitHttpConfig { - oneof protocol_config { - option (validate.required) = true; - - config.core.v3.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v3.Http2ProtocolOptions http2_protocol_options = 2; - - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - config.core.v3.Http3ProtocolOptions http3_protocol_options = 3; - } - } - - // If this is used, the cluster can use either of the configured protocols, and - // will use whichever protocol was used by the downstream connection. - // - // If HTTP/3 is configured for downstream and not configured for upstream, - // HTTP/3 requests will fail over to HTTP/2. - message UseDownstreamHttpConfig { - config.core.v3.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v3.Http2ProtocolOptions http2_protocol_options = 2; - - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - config.core.v3.Http3ProtocolOptions http3_protocol_options = 3; - } - - // If this is used, the cluster can use either HTTP/1 or HTTP/2, and will use whichever - // protocol is negotiated by ALPN with the upstream. - // Clusters configured with *AutoHttpConfig* will use the highest available - // protocol; HTTP/2 if supported, otherwise HTTP/1. - // If the upstream does not support ALPN, *AutoHttpConfig* will fail over to HTTP/1. - // This can only be used with transport sockets which support ALPN. Using a - // transport socket which does not support ALPN will result in configuration - // failure. The transport layer may be configured with custom ALPN, but the default ALPN - // for the cluster (or if custom ALPN fails) will be "h2,http/1.1". - message AutoHttpConfig { - config.core.v3.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v3.Http2ProtocolOptions http2_protocol_options = 2; - - // Unlike HTTP/1 and HTTP/2, HTTP/3 will not be configured unless it is - // present, and (soon) only if there is an indication of server side - // support. - // See :ref:`here ` for more information on - // when HTTP/3 will be used, and when Envoy will fail over to TCP. - // - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - // AutoHttpConfig config is undergoing especially rapid change and as it - // is alpha is not guaranteed to be API-stable. - config.core.v3.Http3ProtocolOptions http3_protocol_options = 3; - - // [#not-implemented-hide:] - // The presence of alternate protocols cache options causes the use of the - // alternate protocols cache, which is responsible for parsing and caching - // HTTP Alt-Svc headers. This enables the use of HTTP/3 for origins that - // advertise supporting it. - // TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled. - config.core.v3.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 4; - } - - // This contains options common across HTTP/1 and HTTP/2 - config.core.v3.HttpProtocolOptions common_http_protocol_options = 1; - - // This contains common protocol options which are only applied upstream. - config.core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 2; - - // This controls the actual protocol to be used upstream. - oneof upstream_protocol_options { - option (validate.required) = true; - - // To explicitly configure either HTTP/1 or HTTP/2 (but not both!) use *explicit_http_config*. - // If the *explicit_http_config* is empty, HTTP/1.1 is used. - ExplicitHttpConfig explicit_http_config = 3; - - // This allows switching on protocol based on what protocol the downstream - // connection used. - UseDownstreamHttpConfig use_downstream_protocol_config = 4; - - // This allows switching on protocol based on ALPN - AutoHttpConfig auto_config = 5; - } -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/generic_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/generic_connection_pool.proto deleted file mode 100644 index 5754491b91d19..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/generic_connection_pool.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.tcp.generic.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.tcp.generic.v3"; -option java_outer_classname = "GenericConnectionPoolProtoOuterClass"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Generic Connection Pool] - -// A connection pool which forwards downstream TCP as TCP or HTTP to upstream, -// based on CONNECT configuration. -// [#extension: envoy.upstreams.tcp.generic] -message GenericConnectionPoolProto { -} diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/wasm/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto deleted file mode 100644 index b4566c826ed08..0000000000000 --- a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto +++ /dev/null @@ -1,165 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.wasm.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm] -// [#extension: envoy.bootstrap.wasm] - -// Configuration for restricting Proxy-Wasm capabilities available to modules. -message CapabilityRestrictionConfig { - // The Proxy-Wasm capabilities which will be allowed. Capabilities are mapped by - // name. The *SanitizationConfig* which each capability maps to is currently unimplemented and ignored, - // and so should be left empty. - // - // The capability names are given in the - // `Proxy-Wasm ABI `_. - // Additionally, the following WASI capabilities from - // `this list `_ - // are implemented and can be allowed: - // *fd_write*, *fd_read*, *fd_seek*, *fd_close*, *fd_fdstat_get*, *environ_get*, *environ_sizes_get*, - // *args_get*, *args_sizes_get*, *proc_exit*, *clock_time_get*, *random_get*. - map allowed_capabilities = 1; -} - -// Configuration for sanitization of inputs to an allowed capability. -// -// NOTE: This is currently unimplemented. -message SanitizationConfig { -} - -// Configuration for a Wasm VM. -// [#next-free-field: 8] -message VmConfig { - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null - // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same - // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can - // reduce memory utilization and make sharing of data easier which may have security implications. - // [#comment: TODO: add ref for details.] - string vm_id = 1; - - // The Wasm runtime type. - // Available Wasm runtime types are registered as extensions. The following runtimes are included - // in Envoy code base: - // - // .. _extension_envoy.wasm.runtime.null: - // - // **envoy.wasm.runtime.null**: Null sandbox, the Wasm module must be compiled and linked into the - // Envoy binary. The registered name is given in the *code* field as *inline_string*. - // - // .. _extension_envoy.wasm.runtime.v8: - // - // **envoy.wasm.runtime.v8**: `V8 `_-based WebAssembly runtime. - // - // .. _extension_envoy.wasm.runtime.wamr: - // - // **envoy.wasm.runtime.wamr**: `WAMR `_-based WebAssembly runtime. - // This runtime is not enabled in the official build. - // - // .. _extension_envoy.wasm.runtime.wavm: - // - // **envoy.wasm.runtime.wavm**: `WAVM `_-based WebAssembly runtime. - // This runtime is not enabled in the official build. - // - // .. _extension_envoy.wasm.runtime.wasmtime: - // - // **envoy.wasm.runtime.wasmtime**: `Wasmtime `_-based WebAssembly runtime. - // This runtime is not enabled in the official build. - // - // [#extension-category: envoy.wasm.runtime] - string runtime = 2 [(validate.rules).string = {min_len: 1}]; - - // The Wasm code that Envoy will execute. - config.core.v3.AsyncDataSource code = 3; - - // The Wasm configuration used in initialization of a new VM - // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before - // passing it to the plugin. `google.protobuf.BytesValue` and - // `google.protobuf.StringValue` are passed directly without the wrapper. - google.protobuf.Any configuration = 4; - - // Allow the wasm file to include pre-compiled code on VMs which support it. - // Warning: this should only be enable for trusted sources as the precompiled code is not - // verified. - bool allow_precompiled = 5; - - // If true and the code needs to be remotely fetched and it is not in the cache then NACK the configuration - // update and do a background fetch to fill the cache, otherwise fetch the code asynchronously and enter - // warming state. - bool nack_on_code_cache_miss = 6; - - // Specifies environment variables to be injected to this VM which will be available through - // WASI's ``environ_get`` and ``environ_get_sizes`` system calls. Note that these functions are mostly implicitly - // called in your language's standard library, so you do not need to call them directly and you can access to env - // vars just like when you do on native platforms. - // Warning: Envoy rejects the configuration if there's conflict of key space. - EnvironmentVariables environment_variables = 7; -} - -message EnvironmentVariables { - // The keys of *Envoy's* environment variables exposed to this VM. In other words, if a key exists in Envoy's environment - // variables, then that key-value pair will be injected. Note that if a key does not exist, it will be ignored. - repeated string host_env_keys = 1; - - // Explicitly given key-value pairs to be injected to this VM in the form of "KEY=VALUE". - map key_values = 2; -} - -// Base Configuration for Wasm Plugins e.g. filters and services. -// [#next-free-field: 7] -message PluginConfig { - // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *root_id* and for - // logging/debugging. - string name = 1; - - // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts - // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank root_id with the same *vm_id* will share Context(s). - string root_id = 2; - - // Configuration for finding or starting VM. - oneof vm { - VmConfig vm_config = 3; - // TODO: add referential VM configurations. - } - - // Filter/service configuration used to configure or reconfigure a plugin - // (proxy_on_configuration). - // `google.protobuf.Struct` is serialized as JSON before - // passing it to the plugin. `google.protobuf.BytesValue` and - // `google.protobuf.StringValue` are passed directly without the wrapper. - google.protobuf.Any configuration = 4; - - // If there is a fatal error on the VM (e.g. exception, abort(), on_start or on_configure return false), - // then all plugins associated with the VM will either fail closed (by default), e.g. by returning an HTTP 503 error, - // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false - // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial - // startup the proxy will not start. - bool fail_open = 5; - - // Configuration for restricting Proxy-Wasm capabilities available to modules. - CapabilityRestrictionConfig capability_restriction_config = 6; -} - -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService -// ` This opaque configuration will be used to create a Wasm Service. -message WasmService { - // General plugin configuration. - PluginConfig config = 1; - - // If true, create a single VM rather than creating one VM per worker. Such a singleton can - // not be used with filters. - bool singleton = 2; -} diff --git a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/service/README.md b/generated_api_shadow/envoy/service/README.md deleted file mode 100644 index 831b740a0ea80..0000000000000 --- a/generated_api_shadow/envoy/service/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Protocol buffer definitions for gRPC and REST services. - -Visibility should be constrained to none (default). diff --git a/generated_api_shadow/envoy/service/accesslog/v2/BUILD b/generated_api_shadow/envoy/service/accesslog/v2/BUILD deleted file mode 100644 index 1253698c39d51..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/data/accesslog/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/accesslog/v2/als.proto b/generated_api_shadow/envoy/service/accesslog/v2/als.proto deleted file mode 100644 index bbd871ff83a4a..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v2/als.proto +++ /dev/null @@ -1,72 +0,0 @@ -syntax = "proto3"; - -package envoy.service.accesslog.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/accesslog/v2/accesslog.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.accesslog.v2"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Service for streaming access logs from Envoy to an access log server. -service AccessLogService { - // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different - // API for "critical" access logs in which Envoy will buffer access logs for some period of time - // until it gets an ACK so it could then retry. This API is designed for high throughput with the - // expectation that it might be lossy. - rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { - } -} - -// Empty response for the StreamAccessLogs API. Will never be sent. See below. -message StreamAccessLogsResponse { -} - -// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream -// access logs without ever expecting a response. -message StreamAccessLogsMessage { - message Identifier { - // The node sending the access log messages over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - - // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig - // `. - string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // Wrapper for batches of HTTP access log entries. - message HTTPAccessLogEntries { - repeated data.accesslog.v2.HTTPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Wrapper for batches of TCP access log entries. - message TCPAccessLogEntries { - repeated data.accesslog.v2.TCPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batches of log entries of a single type. Generally speaking, a given stream should only - // ever include one type of log entry. - oneof log_entries { - option (validate.required) = true; - - HTTPAccessLogEntries http_logs = 2; - - TCPAccessLogEntries tcp_logs = 3; - } -} diff --git a/generated_api_shadow/envoy/service/accesslog/v3/BUILD b/generated_api_shadow/envoy/service/accesslog/v3/BUILD deleted file mode 100644 index d44839fbe0952..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/data/accesslog/v3:pkg", - "//envoy/service/accesslog/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/accesslog/v3/als.proto b/generated_api_shadow/envoy/service/accesslog/v3/als.proto deleted file mode 100644 index 94a290ad4a325..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v3/als.proto +++ /dev/null @@ -1,87 +0,0 @@ -syntax = "proto3"; - -package envoy.service.accesslog.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/data/accesslog/v3/accesslog.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.accesslog.v3"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Service for streaming access logs from Envoy to an access log server. -service AccessLogService { - // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different - // API for "critical" access logs in which Envoy will buffer access logs for some period of time - // until it gets an ACK so it could then retry. This API is designed for high throughput with the - // expectation that it might be lossy. - rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { - } -} - -// Empty response for the StreamAccessLogs API. Will never be sent. See below. -message StreamAccessLogsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsResponse"; -} - -// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream -// access logs without ever expecting a response. -message StreamAccessLogsMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsMessage.Identifier"; - - // The node sending the access log messages over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - - // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig - // `. - string log_name = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Wrapper for batches of HTTP access log entries. - message HTTPAccessLogEntries { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsMessage.HTTPAccessLogEntries"; - - repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Wrapper for batches of TCP access log entries. - message TCPAccessLogEntries { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsMessage.TCPAccessLogEntries"; - - repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batches of log entries of a single type. Generally speaking, a given stream should only - // ever include one type of log entry. - oneof log_entries { - option (validate.required) = true; - - HTTPAccessLogEntries http_logs = 2; - - TCPAccessLogEntries tcp_logs = 3; - } -} diff --git a/generated_api_shadow/envoy/service/auth/v2/BUILD b/generated_api_shadow/envoy/service/auth/v2/BUILD deleted file mode 100644 index fa00ca5127dea..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto b/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto deleted file mode 100644 index 8e0170067d24e..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto +++ /dev/null @@ -1,160 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v2"; -option java_outer_classname = "AttributeContextProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Attribute Context ] - -// See :ref:`network filter configuration overview ` -// and :ref:`HTTP filter configuration overview `. - -// An attribute is a piece of metadata that describes an activity on a network. -// For example, the size of an HTTP request, or the status code of an HTTP response. -// -// Each attribute has a type and a name, which is logically defined as a proto message field -// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes -// supported by Envoy authorization system. -// [#comment: The following items are left out of this proto -// Request.Auth field for jwt tokens -// Request.Api for api management -// Origin peer that originated the request -// Caching Protocol -// request_context return values to inject back into the filter chain -// peer.claims -- from X.509 extensions -// Configuration -// - field mask to send -// - which return values from request_context are copied back -// - which return values are copied into request_headers] -// [#next-free-field: 12] -message AttributeContext { - // This message defines attributes for a node that handles a network request. - // The node can be either a service or an application that sends, forwards, - // or receives the request. Service peers should fill in the `service`, - // `principal`, and `labels` as appropriate. - // [#next-free-field: 6] - message Peer { - // The address of the peer, this is typically the IP address. - // It can also be UDS path, or others. - api.v2.core.Address address = 1; - - // The canonical service name of the peer. - // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster - // ` - // If a more trusted source of the service name is available through mTLS/secure naming, it - // should be used. - string service = 2; - - // The labels associated with the peer. - // These could be pod labels for Kubernetes or tags for VMs. - // The source of the labels could be an X.509 certificate or other configuration. - map labels = 3; - - // The authenticated identity of this peer. - // For example, the identity associated with the workload such as a service account. - // If an X.509 certificate is used to assert the identity this field should be sourced from - // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. - // The primary identity should be the principal. The principal format is issuer specific. - // - // Example: - // * SPIFFE format is `spiffe://trust-domain/path` - // * Google account format is `https://accounts.google.com/{userid}` - string principal = 4; - - // The X.509 certificate used to authenticate the identify of this peer. - // When present, the certificate contents are encoded in URL and PEM format. - string certificate = 5; - } - - // Represents a network request, such as an HTTP request. - message Request { - // The timestamp when the proxy receives the first byte of the request. - google.protobuf.Timestamp time = 1; - - // Represents an HTTP request or an HTTP-like request. - HttpRequest http = 2; - } - - // This message defines attributes for an HTTP request. - // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 12] - message HttpRequest { - // The unique ID for a request, which can be propagated to downstream - // systems. The ID should have low probability of collision - // within a single day for a specific service. - // For HTTP requests, it should be X-Request-ID or equivalent. - string id = 1; - - // The HTTP request method, such as `GET`, `POST`. - string method = 2; - - // The HTTP request headers. If multiple headers share the same key, they - // must be merged according to the HTTP spec. All header keys must be - // lower-cased, because HTTP header keys are case-insensitive. - map headers = 3; - - // The request target, as it appears in the first line of the HTTP request. This includes - // the URL path and query-string. No decoding is performed. - string path = 4; - - // The HTTP request `Host` or 'Authority` header value. - string host = 5; - - // The HTTP URL scheme, such as `http` and `https`. This is set for HTTP/2 - // requests only. For HTTP/1.1, use "x-forwarded-for" header value to lookup - // the scheme of the request. - string scheme = 6; - - // This field is always empty, and exists for compatibility reasons. The HTTP URL query is - // included in `path` field. - string query = 7; - - // This field is always empty, and exists for compatibility reasons. The URL fragment is - // not submitted as part of HTTP requests; it is unknowable. - string fragment = 8; - - // The HTTP request size in bytes. If unknown, it must be -1. - int64 size = 9; - - // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". - // - // See :repo:`headers.h:ProtocolStrings ` for a list of all - // possible values. - string protocol = 10; - - // The HTTP request body. - string body = 11; - } - - // The source of a network activity, such as starting a TCP connection. - // In a multi hop network activity, the source represents the sender of the - // last hop. - Peer source = 1; - - // The destination of a network activity, such as accepting a TCP connection. - // In a multi hop network activity, the destination represents the receiver of - // the last hop. - Peer destination = 2; - - // Represents a network request, such as an HTTP request. - Request request = 4; - - // This is analogous to http_request.headers, however these contents will not be sent to the - // upstream server. Context_extensions provide an extension mechanism for sending additional - // information to the auth server without modifying the proto definition. It maps to the - // internal opaque context in the filter chain. - map context_extensions = 10; - - // Dynamic metadata associated with the request. - api.v2.core.Metadata metadata_context = 11; -} diff --git a/generated_api_shadow/envoy/service/auth/v2/external_auth.proto b/generated_api_shadow/envoy/service/auth/v2/external_auth.proto deleted file mode 100644 index 7dbfd35569681..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2/external_auth.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/service/auth/v2/attribute_context.proto"; -import "envoy/type/http_status.proto"; - -import "google/rpc/status.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v2"; -option java_outer_classname = "ExternalAuthProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Authorization Service ] - -// The authorization service request messages used by external authorization :ref:`network filter -// ` and :ref:`HTTP filter `. - -// A generic interface for performing authorization check on incoming -// requests to a networked service. -service Authorization { - // Performs authorization check based on the attributes associated with the - // incoming request, and returns status `OK` or not `OK`. - rpc Check(CheckRequest) returns (CheckResponse) { - } -} - -message CheckRequest { - // The request attributes. - AttributeContext attributes = 1; -} - -// HTTP attributes for a denied response. -message DeniedHttpResponse { - // This field allows the authorization service to send a HTTP response status - // code to the downstream client other than 403 (Forbidden). - type.HttpStatus status = 1 [(validate.rules).message = {required: true}]; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to - // false when used in this message. - repeated api.v2.core.HeaderValueOption headers = 2; - - // This field allows the authorization service to send a response body data - // to the downstream client. - string body = 3; -} - -// HTTP attributes for an ok response. -message OkHttpResponse { - // HTTP entity headers in addition to the original request headers. This allows the authorization - // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to - // false when used in this message. By setting the `append` field to `true`, - // the filter will append the correspondent header value to the matched request header. - // By leaving `append` as false, the filter will either add a new header, or override an existing - // one if there is a match. - repeated api.v2.core.HeaderValueOption headers = 2; -} - -// Intended for gRPC and Network Authorization servers `only`. -message CheckResponse { - // Status `OK` allows the request. Any other status indicates the request should be denied. - google.rpc.Status status = 1; - - // An message that contains HTTP response attributes. This message is - // used when the authorization service needs to send custom responses to the - // downstream client or, to modify/add request headers being dispatched to the upstream. - oneof http_response { - // Supplies http attributes for a denied response. - DeniedHttpResponse denied_response = 2; - - // Supplies http attributes for an ok response. - OkHttpResponse ok_response = 3; - } -} diff --git a/generated_api_shadow/envoy/service/auth/v2alpha/BUILD b/generated_api_shadow/envoy/service/auth/v2alpha/BUILD deleted file mode 100644 index e72b2a63b2e17..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2alpha/BUILD +++ /dev/null @@ -1,10 +0,0 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -api_proto_package( - has_services = True, - deps = ["//envoy/service/auth/v2:pkg"], -) diff --git a/generated_api_shadow/envoy/service/auth/v2alpha/external_auth.proto b/generated_api_shadow/envoy/service/auth/v2alpha/external_auth.proto deleted file mode 100644 index 85e9c12c6afb4..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2alpha/external_auth.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v2alpha; - -option java_multiple_files = true; -option java_generic_services = true; -option java_outer_classname = "CertsProto"; -option java_package = "io.envoyproxy.envoy.service.auth.v2alpha"; - -import "envoy/service/auth/v2/external_auth.proto"; - -// [#protodoc-title: Authorization Service ] - -// The authorization service request messages used by external authorization :ref:`network filter -// ` and :ref:`HTTP filter `. - -// A generic interface for performing authorization check on incoming -// requests to a networked service. -service Authorization { - // Performs authorization check based on the attributes associated with the - // incoming request, and returns status `OK` or not `OK`. - rpc Check(v2.CheckRequest) returns (v2.CheckResponse); -} diff --git a/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto b/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto deleted file mode 100644 index 452a1e1ad9a5f..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto +++ /dev/null @@ -1,177 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v3"; -option java_outer_classname = "AttributeContextProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Attribute Context ] - -// See :ref:`network filter configuration overview ` -// and :ref:`HTTP filter configuration overview `. - -// An attribute is a piece of metadata that describes an activity on a network. -// For example, the size of an HTTP request, or the status code of an HTTP response. -// -// Each attribute has a type and a name, which is logically defined as a proto message field -// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes -// supported by Envoy authorization system. -// [#comment: The following items are left out of this proto -// Request.Auth field for jwt tokens -// Request.Api for api management -// Origin peer that originated the request -// Caching Protocol -// request_context return values to inject back into the filter chain -// peer.claims -- from X.509 extensions -// Configuration -// - field mask to send -// - which return values from request_context are copied back -// - which return values are copied into request_headers] -// [#next-free-field: 12] -message AttributeContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.AttributeContext"; - - // This message defines attributes for a node that handles a network request. - // The node can be either a service or an application that sends, forwards, - // or receives the request. Service peers should fill in the `service`, - // `principal`, and `labels` as appropriate. - // [#next-free-field: 6] - message Peer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.AttributeContext.Peer"; - - // The address of the peer, this is typically the IP address. - // It can also be UDS path, or others. - config.core.v3.Address address = 1; - - // The canonical service name of the peer. - // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster - // ` - // If a more trusted source of the service name is available through mTLS/secure naming, it - // should be used. - string service = 2; - - // The labels associated with the peer. - // These could be pod labels for Kubernetes or tags for VMs. - // The source of the labels could be an X.509 certificate or other configuration. - map labels = 3; - - // The authenticated identity of this peer. - // For example, the identity associated with the workload such as a service account. - // If an X.509 certificate is used to assert the identity this field should be sourced from - // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. - // The primary identity should be the principal. The principal format is issuer specific. - // - // Example: - // * SPIFFE format is `spiffe://trust-domain/path` - // * Google account format is `https://accounts.google.com/{userid}` - string principal = 4; - - // The X.509 certificate used to authenticate the identify of this peer. - // When present, the certificate contents are encoded in URL and PEM format. - string certificate = 5; - } - - // Represents a network request, such as an HTTP request. - message Request { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.AttributeContext.Request"; - - // The timestamp when the proxy receives the first byte of the request. - google.protobuf.Timestamp time = 1; - - // Represents an HTTP request or an HTTP-like request. - HttpRequest http = 2; - } - - // This message defines attributes for an HTTP request. - // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 13] - message HttpRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.AttributeContext.HttpRequest"; - - // The unique ID for a request, which can be propagated to downstream - // systems. The ID should have low probability of collision - // within a single day for a specific service. - // For HTTP requests, it should be X-Request-ID or equivalent. - string id = 1; - - // The HTTP request method, such as `GET`, `POST`. - string method = 2; - - // The HTTP request headers. If multiple headers share the same key, they - // must be merged according to the HTTP spec. All header keys must be - // lower-cased, because HTTP header keys are case-insensitive. - map headers = 3; - - // The request target, as it appears in the first line of the HTTP request. This includes - // the URL path and query-string. No decoding is performed. - string path = 4; - - // The HTTP request `Host` or 'Authority` header value. - string host = 5; - - // The HTTP URL scheme, such as `http` and `https`. - string scheme = 6; - - // This field is always empty, and exists for compatibility reasons. The HTTP URL query is - // included in `path` field. - string query = 7; - - // This field is always empty, and exists for compatibility reasons. The URL fragment is - // not submitted as part of HTTP requests; it is unknowable. - string fragment = 8; - - // The HTTP request size in bytes. If unknown, it must be -1. - int64 size = 9; - - // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". - // - // See :repo:`headers.h:ProtocolStrings ` for a list of all - // possible values. - string protocol = 10; - - // The HTTP request body. - string body = 11; - - // The HTTP request body in bytes. This is used instead of - // :ref:`body ` when - // :ref:`pack_as_bytes ` - // is set to true. - bytes raw_body = 12; - } - - // The source of a network activity, such as starting a TCP connection. - // In a multi hop network activity, the source represents the sender of the - // last hop. - Peer source = 1; - - // The destination of a network activity, such as accepting a TCP connection. - // In a multi hop network activity, the destination represents the receiver of - // the last hop. - Peer destination = 2; - - // Represents a network request, such as an HTTP request. - Request request = 4; - - // This is analogous to http_request.headers, however these contents will not be sent to the - // upstream server. Context_extensions provide an extension mechanism for sending additional - // information to the auth server without modifying the proto definition. It maps to the - // internal opaque context in the filter chain. - map context_extensions = 10; - - // Dynamic metadata associated with the request. - config.core.v3.Metadata metadata_context = 11; -} diff --git a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto deleted file mode 100644 index b627fcb314751..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto +++ /dev/null @@ -1,134 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/service/auth/v3/attribute_context.proto"; -import "envoy/type/v3/http_status.proto"; - -import "google/protobuf/struct.proto"; -import "google/rpc/status.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v3"; -option java_outer_classname = "ExternalAuthProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Authorization Service ] - -// The authorization service request messages used by external authorization :ref:`network filter -// ` and :ref:`HTTP filter `. - -// A generic interface for performing authorization check on incoming -// requests to a networked service. -service Authorization { - // Performs authorization check based on the attributes associated with the - // incoming request, and returns status `OK` or not `OK`. - rpc Check(CheckRequest) returns (CheckResponse) { - } -} - -message CheckRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.CheckRequest"; - - // The request attributes. - AttributeContext attributes = 1; -} - -// HTTP attributes for a denied response. -message DeniedHttpResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.DeniedHttpResponse"; - - // This field allows the authorization service to send a HTTP response status - // code to the downstream client other than 403 (Forbidden). - type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client. Note that the :ref:`append field in HeaderValueOption ` defaults to - // false when used in this message. - repeated config.core.v3.HeaderValueOption headers = 2; - - // This field allows the authorization service to send a response body data - // to the downstream client. - string body = 3; -} - -// HTTP attributes for an OK response. -// [#next-free-field: 7] -message OkHttpResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.OkHttpResponse"; - - // HTTP entity headers in addition to the original request headers. This allows the authorization - // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. Note that the :ref:`append field in HeaderValueOption ` defaults to - // false when used in this message. By setting the `append` field to `true`, - // the filter will append the correspondent header value to the matched request header. - // By leaving `append` as false, the filter will either add a new header, or override an existing - // one if there is a match. - repeated config.core.v3.HeaderValueOption headers = 2; - - // HTTP entity headers to remove from the original request before dispatching - // it to the upstream. This allows the authorization service to act on auth - // related headers (like `Authorization`), process them, and consume them. - // Under this model, the upstream will either receive the request (if it's - // authorized) or not receive it (if it's not), but will not see headers - // containing authorization credentials. - // - // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as - // the header `Host`, may not be removed as that would make the request - // malformed. If mentioned in `headers_to_remove` these special headers will - // be ignored. - // - // When using the HTTP service this must instead be set by the HTTP - // authorization service as a comma separated list like so: - // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. - repeated string headers_to_remove = 5; - - // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata - // `. Until it is removed, - // setting this field overrides :ref:`CheckResponse.dynamic_metadata - // `. - google.protobuf.Struct dynamic_metadata = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client on success. Note that the :ref:`append field in HeaderValueOption ` - // defaults to false when used in this message. - repeated config.core.v3.HeaderValueOption response_headers_to_add = 6; -} - -// Intended for gRPC and Network Authorization servers `only`. -message CheckResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.CheckResponse"; - - // Status `OK` allows the request. Any other status indicates the request should be denied. - google.rpc.Status status = 1; - - // An message that contains HTTP response attributes. This message is - // used when the authorization service needs to send custom responses to the - // downstream client or, to modify/add request headers being dispatched to the upstream. - oneof http_response { - // Supplies http attributes for a denied response. - DeniedHttpResponse denied_response = 2; - - // Supplies http attributes for an ok response. - OkHttpResponse ok_response = 3; - } - - // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next - // filter. This metadata lives in a namespace specified by the canonical name of extension filter - // that requires it: - // - // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. - // - :ref:`envoy.filters.network.ext_authz ` for network filter. - google.protobuf.Struct dynamic_metadata = 4; -} diff --git a/generated_api_shadow/envoy/service/cluster/v3/BUILD b/generated_api_shadow/envoy/service/cluster/v3/BUILD deleted file mode 100644 index d3be4fae57fa4..0000000000000 --- a/generated_api_shadow/envoy/service/cluster/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/cluster/v3/cds.proto b/generated_api_shadow/envoy/service/cluster/v3/cds.proto deleted file mode 100644 index 100ecad39a968..0000000000000 --- a/generated_api_shadow/envoy/service/cluster/v3/cds.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.service.cluster.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.cluster.v3"; -option java_outer_classname = "CdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: CDS] - -// Return list of all clusters this proxy will load balance to. -service ClusterDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.cluster.v3.Cluster"; - - rpc StreamClusters(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaClusters(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchClusters(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:clusters"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message CdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.CdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/discovery/v2/BUILD b/generated_api_shadow/envoy/service/discovery/v2/BUILD deleted file mode 100644 index ec687f7534368..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/endpoint:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/discovery/v2/ads.proto b/generated_api_shadow/envoy/service/discovery/v2/ads.proto deleted file mode 100644 index d70e0cdc8e149..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/ads.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v2; - -import "envoy/api/v2/discovery.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; -option java_outer_classname = "AdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Aggregated Discovery Service (ADS)] - -// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, -// and listeners are retained in the package `envoy.api.v2` for backwards -// compatibility with existing management servers. New development in discovery -// services should proceed in the package `envoy.service.discovery.v2`. - -// See https://github.com/lyft/envoy-api#apis for a description of the role of -// ADS and how it is intended to be used by a management server. ADS requests -// have the same structure as their singleton xDS counterparts, but can -// multiplex many resource types on a single stream. The type_url in the -// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover -// the multiplexed singleton APIs at the Envoy instance and management server. -service AggregatedDiscoveryService { - // This is a gRPC-only API. - rpc StreamAggregatedResources(stream api.v2.DiscoveryRequest) - returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaAggregatedResources(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message AdsDummy { -} diff --git a/generated_api_shadow/envoy/service/discovery/v2/hds.proto b/generated_api_shadow/envoy/service/discovery/v2/hds.proto deleted file mode 100644 index 76f91c5a456de..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/hds.proto +++ /dev/null @@ -1,138 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/health_check.proto"; -import "envoy/api/v2/endpoint/endpoint_components.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; -option java_outer_classname = "HdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.health.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Health Discovery Service (HDS)] - -// HDS is Health Discovery Service. It compliments Envoy’s health checking -// service by designating this Envoy to be a healthchecker for a subset of hosts -// in the cluster. The status of these health checks will be reported to the -// management server, where it can be aggregated etc and redistributed back to -// Envoy through EDS. -service HealthDiscoveryService { - // 1. Envoy starts up and if its can_healthcheck option in the static - // bootstrap config is enabled, sends HealthCheckRequest to the management - // server. It supplies its capabilities (which protocol it can health check - // with, what zone it resides in, etc.). - // 2. In response to (1), the management server designates this Envoy as a - // healthchecker to health check a subset of all upstream hosts for a given - // cluster (for example upstream Host 1 and Host 2). It streams - // HealthCheckSpecifier messages with cluster related configuration for all - // clusters this Envoy is designated to health check. Subsequent - // HealthCheckSpecifier message will be sent on changes to: - // a. Endpoints to health checks - // b. Per cluster configuration change - // 3. Envoy creates a health probe based on the HealthCheck config and sends - // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck - // configuration Envoy waits upon the arrival of the probe response and - // looks at the content of the response to decide whether the endpoint is - // healthy or not. If a response hasn't been received within the timeout - // interval, the endpoint health status is considered TIMEOUT. - // 4. Envoy reports results back in an EndpointHealthResponse message. - // Envoy streams responses as often as the interval configured by the - // management server in HealthCheckSpecifier. - // 5. The management Server collects health statuses for all endpoints in the - // cluster (for all clusters) and uses this information to construct - // EndpointDiscoveryResponse messages. - // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load - // balances traffic to them without additional health checking. It may - // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection - // failed to a particular endpoint to account for health status propagation - // delay between HDS and EDS). - // By default, can_healthcheck is true. If can_healthcheck is false, Cluster - // configuration may not contain HealthCheck message. - // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above - // invariant? - // TODO(htuch): Add @amb67's diagram. - rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) - returns (stream HealthCheckSpecifier) { - } - - // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of - // request/response. Should we add an identifier to the HealthCheckSpecifier - // to bind with the response? - rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { - option (google.api.http).post = "/v2/discovery:health_check"; - option (google.api.http).body = "*"; - } -} - -// Defines supported protocols etc, so the management server can assign proper -// endpoints to healthcheck. -message Capability { - // Different Envoy instances may have different capabilities (e.g. Redis) - // and/or have ports enabled for different protocols. - enum Protocol { - HTTP = 0; - TCP = 1; - REDIS = 2; - } - - repeated Protocol health_check_protocols = 1; -} - -message HealthCheckRequest { - api.v2.core.Node node = 1; - - Capability capability = 2; -} - -message EndpointHealth { - api.v2.endpoint.Endpoint endpoint = 1; - - api.v2.core.HealthStatus health_status = 2; -} - -message EndpointHealthResponse { - repeated EndpointHealth endpoints_health = 1; -} - -message HealthCheckRequestOrEndpointHealthResponse { - oneof request_type { - HealthCheckRequest health_check_request = 1; - - EndpointHealthResponse endpoint_health_response = 2; - } -} - -message LocalityEndpoints { - api.v2.core.Locality locality = 1; - - repeated api.v2.endpoint.Endpoint endpoints = 2; -} - -// The cluster name and locality is provided to Envoy for the endpoints that it -// health checks to support statistics reporting, logging and debugging by the -// Envoy instance (outside of HDS). For maximum usefulness, it should match the -// same cluster structure as that provided by EDS. -message ClusterHealthCheck { - string cluster_name = 1; - - repeated api.v2.core.HealthCheck health_checks = 2; - - repeated LocalityEndpoints locality_endpoints = 3; -} - -message HealthCheckSpecifier { - repeated ClusterHealthCheck cluster_health_checks = 1; - - // The default is 1 second. - google.protobuf.Duration interval = 2; -} diff --git a/generated_api_shadow/envoy/service/discovery/v2/rtds.proto b/generated_api_shadow/envoy/service/discovery/v2/rtds.proto deleted file mode 100644 index 713ac277072bf..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/rtds.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/struct.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; -option java_outer_classname = "RtdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.runtime.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Runtime Discovery Service (RTDS)] -// RTDS :ref:`configuration overview ` - -// Discovery service for Runtime resources. -service RuntimeDiscoveryService { - option (envoy.annotations.resource).type = "envoy.service.discovery.v2.Runtime"; - - rpc StreamRuntime(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaRuntime(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc FetchRuntime(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:runtime"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message RtdsDummy { -} - -// RTDS resource type. This describes a layer in the runtime virtual filesystem. -message Runtime { - // Runtime resource name. This makes the Runtime a self-describing xDS - // resource. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - google.protobuf.Struct layer = 2; -} diff --git a/generated_api_shadow/envoy/service/discovery/v2/sds.proto b/generated_api_shadow/envoy/service/discovery/v2/sds.proto deleted file mode 100644 index 4d01d475c59bc..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/sds.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; -option java_outer_classname = "SdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.secret.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Secret Discovery Service (SDS)] - -service SecretDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.auth.Secret"; - - rpc DeltaSecrets(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc StreamSecrets(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc FetchSecrets(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:secrets"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message SdsDummy { -} diff --git a/generated_api_shadow/envoy/service/discovery/v3/BUILD b/generated_api_shadow/envoy/service/discovery/v3/BUILD deleted file mode 100644 index 074bab85eb710..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/service/discovery/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/discovery/v3/ads.proto b/generated_api_shadow/envoy/service/discovery/v3/ads.proto deleted file mode 100644 index 03021559ab669..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v3/ads.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v3"; -option java_outer_classname = "AdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Aggregated Discovery Service (ADS)] - -// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, -// and listeners are retained in the package `envoy.api.v2` for backwards -// compatibility with existing management servers. New development in discovery -// services should proceed in the package `envoy.service.discovery.v2`. - -// See https://github.com/lyft/envoy-api#apis for a description of the role of -// ADS and how it is intended to be used by a management server. ADS requests -// have the same structure as their singleton xDS counterparts, but can -// multiplex many resource types on a single stream. The type_url in the -// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover -// the multiplexed singleton APIs at the Envoy instance and management server. -service AggregatedDiscoveryService { - // This is a gRPC-only API. - rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest) - returns (stream DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message AdsDummy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.AdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto b/generated_api_shadow/envoy/service/discovery/v3/discovery.proto deleted file mode 100644 index 4a474d0fe2608..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto +++ /dev/null @@ -1,279 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/rpc/status.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v3"; -option java_outer_classname = "DiscoveryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common discovery API components] - -// A DiscoveryRequest requests a set of versioned resources of the same type for -// a given Envoy node on some API. -// [#next-free-field: 7] -message DiscoveryRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryRequest"; - - // The version_info provided in the request messages will be the version_info - // received with the most recent successfully processed response or empty on - // the first request. It is expected that no new request is sent after a - // response is received until the Envoy instance is ready to ACK/NACK the new - // configuration. ACK/NACK takes place by returning the new API config version - // as applied or the previous API config version respectively. Each type_url - // (see below) has an independent version associated with it. - string version_info = 1; - - // The node making the request. - config.core.v3.Node node = 2; - - // List of resources to subscribe to, e.g. list of cluster names or a route - // configuration name. If this is empty, all resources for the API are - // returned. LDS/CDS may have empty resource_names, which will cause all - // resources for the Envoy instance to be returned. The LDS and CDS responses - // will then imply a number of resources that need to be fetched via EDS/RDS, - // which will be explicitly enumerated in resource_names. - repeated string resource_names = 3; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit - // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is - // required for ADS. - string type_url = 4; - - // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above - // discussion on version_info and the DiscoveryResponse nonce comment. This - // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, - // or 2) the client has not yet accepted an update in this xDS stream (unlike - // delta, where it is populated only for new explicit ACKs). - string response_nonce = 5; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* provides the Envoy - // internal exception related to the failure. It is only intended for consumption during manual - // debugging, the string provided is not guaranteed to be stable across Envoy versions. - google.rpc.Status error_detail = 6; -} - -// [#next-free-field: 7] -message DiscoveryResponse { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryResponse"; - - // The version of the response data. - string version_info = 1; - - // The response resources. These resources are typed and depend on the API being called. - repeated google.protobuf.Any resources = 2; - - // [#not-implemented-hide:] - // Canary is used to support two Envoy command line flags: - // - // * --terminate-on-canary-transition-failure. When set, Envoy is able to - // terminate if it detects that configuration is stuck at canary. Consider - // this example sequence of updates: - // - Management server applies a canary config successfully. - // - Management server rolls back to a production config. - // - Envoy rejects the new production config. - // Since there is no sensible way to continue receiving configuration - // updates, Envoy will then terminate and apply production config from a - // clean slate. - // * --dry-run-canary. When set, a canary response will never be applied, only - // validated via a dry run. - bool canary = 3; - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). - string type_url = 4; - - // For gRPC based subscriptions, the nonce provides a way to explicitly ack a - // specific DiscoveryResponse in a following DiscoveryRequest. Additional - // messages may have been sent by Envoy to the management server for the - // previous version on the stream prior to this DiscoveryResponse, that were - // unprocessed at response send time. The nonce allows the management server - // to ignore any further DiscoveryRequests for the previous version until a - // DiscoveryRequest bearing the nonce. The nonce is optional and is not - // required for non-stream based xDS implementations. - string nonce = 5; - - // The control plane instance that sent the response. - config.core.v3.ControlPlane control_plane = 6; -} - -// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC -// endpoint for Delta xDS. -// -// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full -// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a -// diff to the state of a xDS client. -// In Delta XDS there are per-resource versions, which allow tracking state at -// the resource granularity. -// An xDS Delta session is always in the context of a gRPC bidirectional -// stream. This allows the xDS server to keep track of the state of xDS clients -// connected to it. -// -// In Delta xDS the nonce field is required and used to pair -// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. -// Optionally, a response message level system_version_info is present for -// debugging purposes only. -// -// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest -// can be either or both of: [1] informing the server of what resources the -// client has gained/lost interest in (using resource_names_subscribe and -// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from -// the server (using response_nonce, with presence of error_detail making it a NACK). -// Additionally, the first message (for a given type_url) of a reconnected gRPC stream -// has a third role: informing the server of the resources (and their versions) -// that the client already possesses, using the initial_resource_versions field. -// -// As with state-of-the-world, when multiple resource types are multiplexed (ADS), -// all requests/acknowledgments/updates are logically walled off by type_url: -// a Cluster ACK exists in a completely separate world from a prior Route NACK. -// In particular, initial_resource_versions being sent at the "start" of every -// gRPC stream actually entails a message for each type_url, each with its own -// initial_resource_versions. -// [#next-free-field: 8] -message DeltaDiscoveryRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryRequest"; - - // The node making the request. - config.core.v3.Node node = 1; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if - // resources are only referenced via *xds_resource_subscribe* and - // *xds_resources_unsubscribe*. - string type_url = 2; - - // DeltaDiscoveryRequests allow the client to add or remove individual - // resources to the set of tracked resources in the context of a stream. - // All resource names in the resource_names_subscribe list are added to the - // set of tracked resources and all resource names in the resource_names_unsubscribe - // list are removed from the set of tracked resources. - // - // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or - // resource_names_unsubscribe list simply means that no resources are to be - // added or removed to the resource list. - // *Like* state-of-the-world xDS, the server must send updates for all tracked - // resources, but can also send updates for resources the client has not subscribed to. - // - // NOTE: the server must respond with all resources listed in resource_names_subscribe, - // even if it believes the client has the most recent version of them. The reason: - // the client may have dropped them, but then regained interest before it had a chance - // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. - // - // These two fields can be set in any DeltaDiscoveryRequest, including ACKs - // and initial_resource_versions. - // - // A list of Resource names to add to the list of tracked resources. - repeated string resource_names_subscribe = 3; - - // A list of Resource names to remove from the list of tracked resources. - repeated string resource_names_unsubscribe = 4; - - // Informs the server of the versions of the resources the xDS client knows of, to enable the - // client to continue the same logical xDS session even in the face of gRPC stream reconnection. - // It will not be populated: [1] in the very first stream of a session, since the client will - // not yet have any resources, [2] in any message after the first in a stream (for a given - // type_url), since the server will already be correctly tracking the client's state. - // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) - // The map's keys are names of xDS resources known to the xDS client. - // The map's values are opaque resource versions. - map initial_resource_versions = 5; - - // When the DeltaDiscoveryRequest is a ACK or NACK message in response - // to a previous DeltaDiscoveryResponse, the response_nonce must be the - // nonce in the DeltaDiscoveryResponse. - // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. - string response_nonce = 6; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* - // provides the Envoy internal exception related to the failure. - google.rpc.Status error_detail = 7; -} - -// [#next-free-field: 8] -message DeltaDiscoveryResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.DeltaDiscoveryResponse"; - - // The version of the response data (used for debugging). - string system_version_info = 1; - - // The response resources. These are typed resources, whose types must match - // the type_url field. - repeated Resource resources = 2; - - // field id 3 IS available! - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. - string type_url = 4; - - // Resources names of resources that have be deleted and to be removed from the xDS Client. - // Removed resources for missing resources can be ignored. - repeated string removed_resources = 6; - - // The nonce provides a way for DeltaDiscoveryRequests to uniquely - // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - string nonce = 5; - - // [#not-implemented-hide:] - // The control plane instance that sent the response. - config.core.v3.ControlPlane control_plane = 7; -} - -// [#next-free-field: 8] -message Resource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Resource"; - - // Cache control properties for the resource. - // [#not-implemented-hide:] - message CacheControl { - // If true, xDS proxies may not cache this resource. - // Note that this does not apply to clients other than xDS proxies, which must cache resources - // for their own use, regardless of the value of this field. - bool do_not_cache = 1; - } - - // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; - - // The aliases are a list of other names that this resource can go by. - repeated string aliases = 4; - - // The resource level version. It allows xDS to track the state of individual - // resources. - string version = 1; - - // The resource being tracked. - google.protobuf.Any resource = 2; - - // Time-to-live value for the resource. For each resource, a timer is started. The timer is - // reset each time the resource is received with a new TTL. If the resource is received with - // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the - // configuration for the resource will be removed. - // - // The TTL can be refreshed or changed by sending a response that doesn't change the resource - // version. In this case the resource field does not need to be populated, which allows for - // light-weight "heartbeat" updates to keep a resource with a TTL alive. - // - // The TTL feature is meant to support configurations that should be removed in the event of - // a management server failure. For example, the feature may be used for fault injection - // testing where the fault injection should be terminated in the event that Envoy loses contact - // with the management server. - google.protobuf.Duration ttl = 6; - - // Cache control properties for the resource. - // [#not-implemented-hide:] - CacheControl cache_control = 7; -} diff --git a/generated_api_shadow/envoy/service/endpoint/v3/BUILD b/generated_api_shadow/envoy/service/endpoint/v3/BUILD deleted file mode 100644 index d3be4fae57fa4..0000000000000 --- a/generated_api_shadow/envoy/service/endpoint/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/endpoint/v3/eds.proto b/generated_api_shadow/envoy/service/endpoint/v3/eds.proto deleted file mode 100644 index 7f560b87b79e5..0000000000000 --- a/generated_api_shadow/envoy/service/endpoint/v3/eds.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.service.endpoint.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.endpoint.v3"; -option java_outer_classname = "EdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: EDS] -// Endpoint discovery :ref:`architecture overview ` - -service EndpointDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.endpoint.v3.ClusterLoadAssignment"; - - // The resource_names field in DiscoveryRequest specifies a list of clusters - // to subscribe to updates for. - rpc StreamEndpoints(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaEndpoints(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchEndpoints(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:endpoints"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message EdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.EdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/endpoint/v3/leds.proto b/generated_api_shadow/envoy/service/endpoint/v3/leds.proto deleted file mode 100644 index 89172f487eba0..0000000000000 --- a/generated_api_shadow/envoy/service/endpoint/v3/leds.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.service.endpoint.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.endpoint.v3"; -option java_outer_classname = "LedsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#not-implemented-hide:] -// [#protodoc-title: LEDS] -// Locality-Endpoint discovery -// [#comment:TODO(adisuissa): Link to unified matching docs: -// :ref:`architecture overview`] - -service LocalityEndpointDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.endpoint.v3.LbEndpoint"; - - // State-of-the-World (DiscoveryRequest) and REST are not supported. - - // The resource_names_subscribe resource_names_unsubscribe fields in DeltaDiscoveryRequest - // specify a list of glob collections to subscribe to updates for. - rpc DeltaLocalityEndpoints(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message LedsDummy { -} diff --git a/generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD b/generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD deleted file mode 100644 index 4f58bd462f66c..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto b/generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto deleted file mode 100644 index 8d07f04640caf..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.service.event_reporting.v2alpha; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.event_reporting.v2alpha"; -option java_outer_classname = "EventReportingServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.event_reporting.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC Event Reporting Service] - -// [#not-implemented-hide:] -// Service for streaming different types of events from Envoy to a server. The examples of -// such events may be health check or outlier detection events. -service EventReportingService { - // Envoy will connect and send StreamEventsRequest messages forever. - // The management server may send StreamEventsResponse to configure event stream. See below. - // This API is designed for high throughput with the expectation that it might be lossy. - rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { - } -} - -// [#not-implemented-hide:] -// An events envoy sends to the management server. -message StreamEventsRequest { - message Identifier { - // The node sending the event messages over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batch of events. When the stream is already active, it will be the events occurred - // since the last message had been sent. If the server receives unknown event type, it should - // silently ignore it. - // - // The following events are supported: - // - // * :ref:`HealthCheckEvent ` - // * :ref:`OutlierDetectionEvent ` - repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// [#not-implemented-hide:] -// The management server may send envoy a StreamEventsResponse to tell which events the server -// is interested in. In future, with aggregated event reporting service, this message will -// contain, for example, clusters the envoy should send events for, or event types the server -// wants to process. -message StreamEventsResponse { -} diff --git a/generated_api_shadow/envoy/service/event_reporting/v3/BUILD b/generated_api_shadow/envoy/service/event_reporting/v3/BUILD deleted file mode 100644 index 7753cfeb3d6e5..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto b/generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto deleted file mode 100644 index 30c161a1c5309..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; - -package envoy.service.event_reporting.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.event_reporting.v3"; -option java_outer_classname = "EventReportingServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC Event Reporting Service] - -// [#not-implemented-hide:] -// Service for streaming different types of events from Envoy to a server. The examples of -// such events may be health check or outlier detection events. -service EventReportingService { - // Envoy will connect and send StreamEventsRequest messages forever. - // The management server may send StreamEventsResponse to configure event stream. See below. - // This API is designed for high throughput with the expectation that it might be lossy. - rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { - } -} - -// [#not-implemented-hide:] -// An events envoy sends to the management server. -message StreamEventsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v2alpha.StreamEventsRequest"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v2alpha.StreamEventsRequest.Identifier"; - - // The node sending the event messages over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batch of events. When the stream is already active, it will be the events occurred - // since the last message had been sent. If the server receives unknown event type, it should - // silently ignore it. - // - // The following events are supported: - // - // * :ref:`HealthCheckEvent ` - // * :ref:`OutlierDetectionEvent ` - repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// [#not-implemented-hide:] -// The management server may send envoy a StreamEventsResponse to tell which events the server -// is interested in. In future, with aggregated event reporting service, this message will -// contain, for example, clusters the envoy should send events for, or event types the server -// wants to process. -message StreamEventsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v2alpha.StreamEventsResponse"; -} diff --git a/generated_api_shadow/envoy/service/ext_proc/v3alpha/BUILD b/generated_api_shadow/envoy/service/ext_proc/v3alpha/BUILD deleted file mode 100644 index 4f3730e2af32e..0000000000000 --- a/generated_api_shadow/envoy/service/ext_proc/v3alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/extension/v3/BUILD b/generated_api_shadow/envoy/service/extension/v3/BUILD deleted file mode 100644 index 9f2ae1e747c54..0000000000000 --- a/generated_api_shadow/envoy/service/extension/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto deleted file mode 100644 index cf83adbd26444..0000000000000 --- a/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.service.extension.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.extension.v3"; -option java_outer_classname = "ConfigDiscoveryProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Extension Config Discovery Service (ECDS)] - -// Return extension configurations. -service ExtensionConfigDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.core.v3.TypedExtensionConfig"; - - rpc StreamExtensionConfigs(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaExtensionConfigs(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchExtensionConfigs(discovery.v3.DiscoveryRequest) - returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:extension_configs"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue -// with importing services: https://github.com/google/protobuf/issues/4221 and -// protoxform to upgrade the file. -message EcdsDummy { -} diff --git a/generated_api_shadow/envoy/service/health/v3/BUILD b/generated_api_shadow/envoy/service/health/v3/BUILD deleted file mode 100644 index 30ba155208b5d..0000000000000 --- a/generated_api_shadow/envoy/service/health/v3/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/service/discovery/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/health/v3/hds.proto b/generated_api_shadow/envoy/service/health/v3/hds.proto deleted file mode 100644 index 51266a64fa959..0000000000000 --- a/generated_api_shadow/envoy/service/health/v3/hds.proto +++ /dev/null @@ -1,193 +0,0 @@ -syntax = "proto3"; - -package envoy.service.health.v3; - -import "envoy/config/cluster/v3/cluster.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/health_check.proto"; -import "envoy/config/endpoint/v3/endpoint_components.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.health.v3"; -option java_outer_classname = "HdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Health Discovery Service (HDS)] - -// HDS is Health Discovery Service. It compliments Envoy’s health checking -// service by designating this Envoy to be a healthchecker for a subset of hosts -// in the cluster. The status of these health checks will be reported to the -// management server, where it can be aggregated etc and redistributed back to -// Envoy through EDS. -service HealthDiscoveryService { - // 1. Envoy starts up and if its can_healthcheck option in the static - // bootstrap config is enabled, sends HealthCheckRequest to the management - // server. It supplies its capabilities (which protocol it can health check - // with, what zone it resides in, etc.). - // 2. In response to (1), the management server designates this Envoy as a - // healthchecker to health check a subset of all upstream hosts for a given - // cluster (for example upstream Host 1 and Host 2). It streams - // HealthCheckSpecifier messages with cluster related configuration for all - // clusters this Envoy is designated to health check. Subsequent - // HealthCheckSpecifier message will be sent on changes to: - // a. Endpoints to health checks - // b. Per cluster configuration change - // 3. Envoy creates a health probe based on the HealthCheck config and sends - // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck - // configuration Envoy waits upon the arrival of the probe response and - // looks at the content of the response to decide whether the endpoint is - // healthy or not. If a response hasn't been received within the timeout - // interval, the endpoint health status is considered TIMEOUT. - // 4. Envoy reports results back in an EndpointHealthResponse message. - // Envoy streams responses as often as the interval configured by the - // management server in HealthCheckSpecifier. - // 5. The management Server collects health statuses for all endpoints in the - // cluster (for all clusters) and uses this information to construct - // EndpointDiscoveryResponse messages. - // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load - // balances traffic to them without additional health checking. It may - // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection - // failed to a particular endpoint to account for health status propagation - // delay between HDS and EDS). - // By default, can_healthcheck is true. If can_healthcheck is false, Cluster - // configuration may not contain HealthCheck message. - // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above - // invariant? - // TODO(htuch): Add @amb67's diagram. - rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) - returns (stream HealthCheckSpecifier) { - } - - // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of - // request/response. Should we add an identifier to the HealthCheckSpecifier - // to bind with the response? - rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { - option (google.api.http).post = "/v3/discovery:health_check"; - option (google.api.http).body = "*"; - } -} - -// Defines supported protocols etc, so the management server can assign proper -// endpoints to healthcheck. -message Capability { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.Capability"; - - // Different Envoy instances may have different capabilities (e.g. Redis) - // and/or have ports enabled for different protocols. - enum Protocol { - HTTP = 0; - TCP = 1; - REDIS = 2; - } - - repeated Protocol health_check_protocols = 1; -} - -message HealthCheckRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.HealthCheckRequest"; - - config.core.v3.Node node = 1; - - Capability capability = 2; -} - -message EndpointHealth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.EndpointHealth"; - - config.endpoint.v3.Endpoint endpoint = 1; - - config.core.v3.HealthStatus health_status = 2; -} - -// Group endpoint health by locality under each cluster. -message LocalityEndpointsHealth { - config.core.v3.Locality locality = 1; - - repeated EndpointHealth endpoints_health = 2; -} - -// The health status of endpoints in a cluster. The cluster name and locality -// should match the corresponding fields in ClusterHealthCheck message. -message ClusterEndpointsHealth { - string cluster_name = 1; - - repeated LocalityEndpointsHealth locality_endpoints_health = 2; -} - -message EndpointHealthResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.EndpointHealthResponse"; - - // Deprecated - Flat list of endpoint health information. - repeated EndpointHealth endpoints_health = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Organize Endpoint health information by cluster. - repeated ClusterEndpointsHealth cluster_endpoints_health = 2; -} - -message HealthCheckRequestOrEndpointHealthResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.HealthCheckRequestOrEndpointHealthResponse"; - - oneof request_type { - HealthCheckRequest health_check_request = 1; - - EndpointHealthResponse endpoint_health_response = 2; - } -} - -message LocalityEndpoints { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.LocalityEndpoints"; - - config.core.v3.Locality locality = 1; - - repeated config.endpoint.v3.Endpoint endpoints = 2; -} - -// The cluster name and locality is provided to Envoy for the endpoints that it -// health checks to support statistics reporting, logging and debugging by the -// Envoy instance (outside of HDS). For maximum usefulness, it should match the -// same cluster structure as that provided by EDS. -message ClusterHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.ClusterHealthCheck"; - - string cluster_name = 1; - - repeated config.core.v3.HealthCheck health_checks = 2; - - repeated LocalityEndpoints locality_endpoints = 3; - - // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria ` - // on connection when health checking. For more details, see - // :ref:`config.cluster.v3.Cluster.transport_socket_matches `. - repeated config.cluster.v3.Cluster.TransportSocketMatch transport_socket_matches = 4; -} - -message HealthCheckSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.HealthCheckSpecifier"; - - repeated ClusterHealthCheck cluster_health_checks = 1; - - // The default is 1 second. - google.protobuf.Duration interval = 2; -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message HdsDummy { -} diff --git a/generated_api_shadow/envoy/service/listener/v3/BUILD b/generated_api_shadow/envoy/service/listener/v3/BUILD deleted file mode 100644 index d3be4fae57fa4..0000000000000 --- a/generated_api_shadow/envoy/service/listener/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/listener/v3/lds.proto b/generated_api_shadow/envoy/service/listener/v3/lds.proto deleted file mode 100644 index 5b8c0d5207258..0000000000000 --- a/generated_api_shadow/envoy/service/listener/v3/lds.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.service.listener.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.listener.v3"; -option java_outer_classname = "LdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Listener] -// Listener :ref:`configuration overview ` - -// The Envoy instance initiates an RPC at startup to discover a list of -// listeners. Updates are delivered via streaming from the LDS server and -// consist of a complete update of all listeners. Existing connections will be -// allowed to drain from listeners that are no longer present. -service ListenerDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.listener.v3.Listener"; - - rpc DeltaListeners(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc StreamListeners(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc FetchListeners(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:listeners"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message LdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/load_stats/v2/BUILD b/generated_api_shadow/envoy/service/load_stats/v2/BUILD deleted file mode 100644 index 1263251505f6b..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/endpoint:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto deleted file mode 100644 index 7ab87c2dfb04f..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto +++ /dev/null @@ -1,88 +0,0 @@ -syntax = "proto3"; - -package envoy.service.load_stats.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/endpoint/load_report.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.load_stats.v2"; -option java_outer_classname = "LrsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Load reporting service] - -service LoadReportingService { - // Advanced API to allow for multi-dimensional load balancing by remote - // server. For receiving LB assignments, the steps are: - // 1, The management server is configured with per cluster/zone/load metric - // capacity configuration. The capacity configuration definition is - // outside of the scope of this document. - // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters - // to balance. - // - // Independently, Envoy will initiate a StreamLoadStats bidi stream with a - // management server: - // 1. Once a connection establishes, the management server publishes a - // LoadStatsResponse for all clusters it is interested in learning load - // stats about. - // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts - // based on per-zone weights and/or per-instance weights (if specified) - // based on intra-zone LbPolicy. This information comes from the above - // {Stream,Fetch}Endpoints. - // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. - // 4. Envoy aggregates load reports over the period of time given to it in - // LoadStatsResponse.load_reporting_interval. This includes aggregation - // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as - // well as load metrics from upstream hosts. - // 5. When the timer of load_reporting_interval expires, Envoy sends new - // LoadStatsRequest filled with load reports for each cluster. - // 6. The management server uses the load reports from all reported Envoys - // from around the world, computes global assignment and prepares traffic - // assignment destined for each zone Envoys are located in. Goto 2. - rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { - } -} - -// A load report Envoy sends to the management server. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -message LoadStatsRequest { - // Node identifier for Envoy instance. - api.v2.core.Node node = 1; - - // A list of load stats to report. - repeated api.v2.endpoint.ClusterStats cluster_stats = 2; -} - -// The management server sends envoy a LoadStatsResponse with all clusters it -// is interested in learning load stats about. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -message LoadStatsResponse { - // Clusters to report stats for. - // Not populated if *send_all_clusters* is true. - repeated string clusters = 1; - - // If true, the client should send all clusters it knows about. - // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their - // :ref:`client_features` field will honor this field. - bool send_all_clusters = 4; - - // The minimum interval of time to collect stats over. This is only a minimum for two reasons: - // 1. There may be some delay from when the timer fires until stats sampling occurs. - // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic - // that is observed in between the corresponding previous *LoadStatsRequest* and this - // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period - // of inobservability that might otherwise exists between the messages. New clusters are not - // subject to this consideration. - google.protobuf.Duration load_reporting_interval = 2; - - // Set to *true* if the management server supports endpoint granularity - // report. - bool report_endpoint_granularity = 3; -} diff --git a/generated_api_shadow/envoy/service/load_stats/v3/BUILD b/generated_api_shadow/envoy/service/load_stats/v3/BUILD deleted file mode 100644 index d69e005bae22f..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/service/load_stats/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto deleted file mode 100644 index 0b565ebe72368..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -package envoy.service.load_stats.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/endpoint/v3/load_report.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.load_stats.v3"; -option java_outer_classname = "LrsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Load Reporting service (LRS)] - -// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional -// stream with a management server. Upon connecting, the management server can send a -// :ref:`LoadStatsResponse ` to a node it is -// interested in getting the load reports for. Envoy in this node will start sending -// :ref:`LoadStatsRequest `. This is done periodically -// based on the :ref:`load reporting interval ` -// For details, take a look at the :ref:`Load Reporting Service sandbox example `. - -service LoadReportingService { - // Advanced API to allow for multi-dimensional load balancing by remote - // server. For receiving LB assignments, the steps are: - // 1, The management server is configured with per cluster/zone/load metric - // capacity configuration. The capacity configuration definition is - // outside of the scope of this document. - // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters - // to balance. - // - // Independently, Envoy will initiate a StreamLoadStats bidi stream with a - // management server: - // 1. Once a connection establishes, the management server publishes a - // LoadStatsResponse for all clusters it is interested in learning load - // stats about. - // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts - // based on per-zone weights and/or per-instance weights (if specified) - // based on intra-zone LbPolicy. This information comes from the above - // {Stream,Fetch}Endpoints. - // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. - // 4. Envoy aggregates load reports over the period of time given to it in - // LoadStatsResponse.load_reporting_interval. This includes aggregation - // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as - // well as load metrics from upstream hosts. - // 5. When the timer of load_reporting_interval expires, Envoy sends new - // LoadStatsRequest filled with load reports for each cluster. - // 6. The management server uses the load reports from all reported Envoys - // from around the world, computes global assignment and prepares traffic - // assignment destined for each zone Envoys are located in. Goto 2. - rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { - } -} - -// A load report Envoy sends to the management server. -message LoadStatsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.load_stats.v2.LoadStatsRequest"; - - // Node identifier for Envoy instance. - config.core.v3.Node node = 1; - - // A list of load stats to report. - repeated config.endpoint.v3.ClusterStats cluster_stats = 2; -} - -// The management server sends envoy a LoadStatsResponse with all clusters it -// is interested in learning load stats about. -message LoadStatsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.load_stats.v2.LoadStatsResponse"; - - // Clusters to report stats for. - // Not populated if *send_all_clusters* is true. - repeated string clusters = 1; - - // If true, the client should send all clusters it knows about. - // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their - // :ref:`client_features` field will honor this field. - bool send_all_clusters = 4; - - // The minimum interval of time to collect stats over. This is only a minimum for two reasons: - // - // 1. There may be some delay from when the timer fires until stats sampling occurs. - // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic - // that is observed in between the corresponding previous *LoadStatsRequest* and this - // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period - // of inobservability that might otherwise exists between the messages. New clusters are not - // subject to this consideration. - google.protobuf.Duration load_reporting_interval = 2; - - // Set to *true* if the management server supports endpoint granularity - // report. - bool report_endpoint_granularity = 3; -} diff --git a/generated_api_shadow/envoy/service/metrics/v2/BUILD b/generated_api_shadow/envoy/service/metrics/v2/BUILD deleted file mode 100644 index 79fc6928c032a..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@prometheus_metrics_model//:client_model", - ], -) diff --git a/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto deleted file mode 100644 index 78d6e47e20ab1..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.service.metrics.v2; - -import "envoy/api/v2/core/base.proto"; - -import "io/prometheus/client/metrics.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.metrics.v2"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metrics service] - -// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric -// data model as a standard to represent metrics information. -service MetricsService { - // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. - rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { - } -} - -message StreamMetricsResponse { -} - -message StreamMetricsMessage { - message Identifier { - // The node sending metrics over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // A list of metric entries - repeated io.prometheus.client.MetricFamily envoy_metrics = 2; -} diff --git a/generated_api_shadow/envoy/service/metrics/v3/BUILD b/generated_api_shadow/envoy/service/metrics/v3/BUILD deleted file mode 100644 index b266dfc5558d2..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/service/metrics/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@prometheus_metrics_model//:client_model", - ], -) diff --git a/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto deleted file mode 100644 index e86bda356f7d2..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.service.metrics.v3; - -import "envoy/config/core/v3/base.proto"; - -import "io/prometheus/client/metrics.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.metrics.v3"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metrics service] - -// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric -// data model as a standard to represent metrics information. -service MetricsService { - // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. - rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { - } -} - -message StreamMetricsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v2.StreamMetricsResponse"; -} - -message StreamMetricsMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v2.StreamMetricsMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v2.StreamMetricsMessage.Identifier"; - - // The node sending metrics over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // A list of metric entries - repeated io.prometheus.client.MetricFamily envoy_metrics = 2; -} diff --git a/generated_api_shadow/envoy/service/ratelimit/v2/BUILD b/generated_api_shadow/envoy/service/ratelimit/v2/BUILD deleted file mode 100644 index eedc3e62b3b20..0000000000000 --- a/generated_api_shadow/envoy/service/ratelimit/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/ratelimit:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto b/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto deleted file mode 100644 index cee8cd7bc3d5d..0000000000000 --- a/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto +++ /dev/null @@ -1,115 +0,0 @@ -syntax = "proto3"; - -package envoy.service.ratelimit.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/ratelimit/ratelimit.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.ratelimit.v2"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate Limit Service (RLS)] - -service RateLimitService { - // Determine whether rate limiting should take place. - rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) { - } -} - -// Main message for a rate limit request. The rate limit service is designed to be fully generic -// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded -// configuration will parse the request and find the most specific limit to apply. In addition, -// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors -// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any -// of them are over limit. This enables more complex application level rate limiting scenarios -// if desired. -message RateLimitRequest { - // All rate limit requests must specify a domain. This enables the configuration to be per - // application without fear of overlap. E.g., "envoy". - string domain = 1; - - // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is - // processed by the service (see below). If any of the descriptors are over limit, the entire - // request is considered to be over limit. - repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 2; - - // Rate limit requests can optionally specify the number of hits a request adds to the matched - // limit. If the value is not set in the message, a request increases the matched limit by 1. - uint32 hits_addend = 3; -} - -// A response from a ShouldRateLimit call. -message RateLimitResponse { - enum Code { - // The response code is not known. - UNKNOWN = 0; - - // The response code to notify that the number of requests are under limit. - OK = 1; - - // The response code to notify that the number of requests are over limit. - OVER_LIMIT = 2; - } - - // Defines an actual rate limit in terms of requests per unit of time and the unit itself. - message RateLimit { - enum Unit { - // The time unit is not known. - UNKNOWN = 0; - - // The time unit representing a second. - SECOND = 1; - - // The time unit representing a minute. - MINUTE = 2; - - // The time unit representing an hour. - HOUR = 3; - - // The time unit representing a day. - DAY = 4; - } - - // A name or description of this limit. - string name = 3; - - // The number of requests per unit of time. - uint32 requests_per_unit = 1; - - // The unit of time. - Unit unit = 2; - } - - message DescriptorStatus { - // The response code for an individual descriptor. - Code code = 1; - - // The current limit as configured by the server. Useful for debugging, etc. - RateLimit current_limit = 2; - - // The limit remaining in the current time unit. - uint32 limit_remaining = 3; - } - - // The overall response code which takes into account all of the descriptors that were passed - // in the RateLimitRequest message. - Code overall_code = 1; - - // A list of DescriptorStatus messages which matches the length of the descriptor list passed - // in the RateLimitRequest. This can be used by the caller to determine which individual - // descriptors failed and/or what the currently configured limits are for all of them. - repeated DescriptorStatus statuses = 2; - - // A list of headers to add to the response - repeated api.v2.core.HeaderValue headers = 3 - [(udpa.annotations.field_migrate).rename = "response_headers_to_add"]; - - // A list of headers to add to the request when forwarded - repeated api.v2.core.HeaderValue request_headers_to_add = 4; -} diff --git a/generated_api_shadow/envoy/service/ratelimit/v3/BUILD b/generated_api_shadow/envoy/service/ratelimit/v3/BUILD deleted file mode 100644 index 222b9ac522924..0000000000000 --- a/generated_api_shadow/envoy/service/ratelimit/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "//envoy/service/ratelimit/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto deleted file mode 100644 index ab8e0ffc0eba7..0000000000000 --- a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto +++ /dev/null @@ -1,196 +0,0 @@ -syntax = "proto3"; - -package envoy.service.ratelimit.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.ratelimit.v3"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate Limit Service (RLS)] - -service RateLimitService { - // Determine whether rate limiting should take place. - rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) { - } -} - -// Main message for a rate limit request. The rate limit service is designed to be fully generic -// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded -// configuration will parse the request and find the most specific limit to apply. In addition, -// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors -// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any -// of them are over limit. This enables more complex application level rate limiting scenarios -// if desired. -message RateLimitRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.ratelimit.v2.RateLimitRequest"; - - // All rate limit requests must specify a domain. This enables the configuration to be per - // application without fear of overlap. E.g., "envoy". - string domain = 1; - - // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is - // processed by the service (see below). If any of the descriptors are over limit, the entire - // request is considered to be over limit. - repeated envoy.extensions.common.ratelimit.v3.RateLimitDescriptor descriptors = 2; - - // Rate limit requests can optionally specify the number of hits a request adds to the matched - // limit. If the value is not set in the message, a request increases the matched limit by 1. - uint32 hits_addend = 3; -} - -// A response from a ShouldRateLimit call. -// [#next-free-field: 7] -message RateLimitResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.ratelimit.v2.RateLimitResponse"; - - enum Code { - // The response code is not known. - UNKNOWN = 0; - - // The response code to notify that the number of requests are under limit. - OK = 1; - - // The response code to notify that the number of requests are over limit. - OVER_LIMIT = 2; - } - - // Defines an actual rate limit in terms of requests per unit of time and the unit itself. - message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.ratelimit.v2.RateLimitResponse.RateLimit"; - - // Identifies the unit of of time for rate limit. - // [#comment: replace by envoy/type/v3/ratelimit_unit.proto in v4] - enum Unit { - // The time unit is not known. - UNKNOWN = 0; - - // The time unit representing a second. - SECOND = 1; - - // The time unit representing a minute. - MINUTE = 2; - - // The time unit representing an hour. - HOUR = 3; - - // The time unit representing a day. - DAY = 4; - } - - // A name or description of this limit. - string name = 3; - - // The number of requests per unit of time. - uint32 requests_per_unit = 1; - - // The unit of time. - Unit unit = 2; - } - - // Cacheable quota for responses, see documentation for the :ref:`quota - // ` field. - // [#not-implemented-hide:] - message Quota { - // Number of matching requests granted in quota. Must be 1 or more. - uint32 requests = 1 [(validate.rules).uint32 = {gt: 0}]; - - oneof expiration_specifier { - // Point in time at which the quota expires. - google.protobuf.Timestamp valid_until = 2; - } - } - - // [#next-free-field: 6] - message DescriptorStatus { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.ratelimit.v2.RateLimitResponse.DescriptorStatus"; - - // The response code for an individual descriptor. - Code code = 1; - - // The current limit as configured by the server. Useful for debugging, etc. - RateLimit current_limit = 2; - - // The limit remaining in the current time unit. - uint32 limit_remaining = 3; - - // Duration until reset of the current limit window. - google.protobuf.Duration duration_until_reset = 4; - - // Quota granted for the descriptor. This is a certain number of requests over a period of time. - // The client may cache this result and apply the effective RateLimitResponse to future matching - // requests containing a matching descriptor without querying rate limit service. - // - // Quota is available for a request if its descriptor set has cached quota available for all - // descriptors. - // - // If quota is available, a RLS request will not be made and the quota will be reduced by 1 for - // all matching descriptors. - // - // If there is not sufficient quota, there are three cases: - // 1. A cached entry exists for a RLS descriptor that is out-of-quota, but not expired. - // In this case, the request will be treated as OVER_LIMIT. - // 2. Some RLS descriptors have a cached entry that has valid quota but some RLS descriptors - // have no cached entry. This will trigger a new RLS request. - // When the result is returned, a single unit will be consumed from the quota for all - // matching descriptors. - // If the server did not provide a quota, such as the quota message is empty for some of - // the descriptors, then the request admission is determined by the - // :ref:`overall_code `. - // 3. All RLS descriptors lack a cached entry, this will trigger a new RLS request, - // When the result is returned, a single unit will be consumed from the quota for all - // matching descriptors. - // If the server did not provide a quota, such as the quota message is empty for some of - // the descriptors, then the request admission is determined by the - // :ref:`overall_code `. - // - // When quota expires due to timeout, a new RLS request will also be made. - // The implementation may choose to preemptively query the rate limit server for more quota on or - // before expiration or before the available quota runs out. - // [#not-implemented-hide:] - Quota quota = 5; - } - - // The overall response code which takes into account all of the descriptors that were passed - // in the RateLimitRequest message. - Code overall_code = 1; - - // A list of DescriptorStatus messages which matches the length of the descriptor list passed - // in the RateLimitRequest. This can be used by the caller to determine which individual - // descriptors failed and/or what the currently configured limits are for all of them. - repeated DescriptorStatus statuses = 2; - - // A list of headers to add to the response - repeated config.core.v3.HeaderValue response_headers_to_add = 3; - - // A list of headers to add to the request when forwarded - repeated config.core.v3.HeaderValue request_headers_to_add = 4; - - // A response body to send to the downstream client when the response code is not OK. - bytes raw_body = 5; - - // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next - // filter. This metadata lives in a namespace specified by the canonical name of extension filter - // that requires it: - // - // - :ref:`envoy.filters.http.ratelimit ` for HTTP filter. - // - :ref:`envoy.filters.network.ratelimit ` for network filter. - // - :ref:`envoy.filters.thrift.rate_limit ` for Thrift filter. - google.protobuf.Struct dynamic_metadata = 6; -} diff --git a/generated_api_shadow/envoy/service/route/v3/BUILD b/generated_api_shadow/envoy/service/route/v3/BUILD deleted file mode 100644 index d3be4fae57fa4..0000000000000 --- a/generated_api_shadow/envoy/service/route/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/route/v3/rds.proto b/generated_api_shadow/envoy/service/route/v3/rds.proto deleted file mode 100644 index 62a7da4094936..0000000000000 --- a/generated_api_shadow/envoy/service/route/v3/rds.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; - -package envoy.service.route.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.route.v3"; -option java_outer_classname = "RdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: RDS] - -// The resource_names field in DiscoveryRequest specifies a route configuration. -// This allows an Envoy configuration with multiple HTTP listeners (and -// associated HTTP connection manager filters) to use different route -// configurations. Each listener will bind its HTTP connection manager filter to -// a route table via this identifier. -service RouteDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.route.v3.RouteConfiguration"; - - rpc StreamRoutes(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaRoutes(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:routes"; - option (google.api.http).body = "*"; - } -} - -// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for -// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered -// during the processing of an HTTP request if a route for the request cannot be resolved. The -// :ref:`resource_names_subscribe ` -// field contains a list of virtual host names or aliases to track. The contents of an alias would -// be the contents of a *host* or *authority* header used to make an http request. An xDS server -// will match an alias to a virtual host based on the content of :ref:`domains' -// ` field. The *resource_names_unsubscribe* field -// contains a list of virtual host names that have been :ref:`unsubscribed -// ` from the routing table associated with the RouteConfiguration. -service VirtualHostDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.route.v3.VirtualHost"; - - rpc DeltaVirtualHosts(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message RdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/route/v3/srds.proto b/generated_api_shadow/envoy/service/route/v3/srds.proto deleted file mode 100644 index 64fe45fee1fab..0000000000000 --- a/generated_api_shadow/envoy/service/route/v3/srds.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.service.route.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.route.v3"; -option java_outer_classname = "SrdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SRDS] -// * Routing :ref:`architecture overview ` - -// The Scoped Routes Discovery Service (SRDS) API distributes -// :ref:`ScopedRouteConfiguration` -// resources. Each ScopedRouteConfiguration resource represents a "routing -// scope" containing a mapping that allows the HTTP connection manager to -// dynamically assign a routing table (specified via a -// :ref:`RouteConfiguration` message) to each -// HTTP request. -service ScopedRoutesDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.route.v3.ScopedRouteConfiguration"; - - rpc StreamScopedRoutes(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaScopedRoutes(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchScopedRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:scoped-routes"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message SrdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.SrdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/runtime/v3/BUILD b/generated_api_shadow/envoy/service/runtime/v3/BUILD deleted file mode 100644 index fb6a1656ca9bf..0000000000000 --- a/generated_api_shadow/envoy/service/runtime/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/service/discovery/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/runtime/v3/rtds.proto b/generated_api_shadow/envoy/service/runtime/v3/rtds.proto deleted file mode 100644 index 796b6fac24e67..0000000000000 --- a/generated_api_shadow/envoy/service/runtime/v3/rtds.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.service.runtime.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/struct.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.runtime.v3"; -option java_outer_classname = "RtdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Runtime Discovery Service (RTDS)] -// RTDS :ref:`configuration overview ` - -// Discovery service for Runtime resources. -service RuntimeDiscoveryService { - option (envoy.annotations.resource).type = "envoy.service.runtime.v3.Runtime"; - - rpc StreamRuntime(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaRuntime(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchRuntime(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:runtime"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message RtdsDummy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.RtdsDummy"; -} - -// RTDS resource type. This describes a layer in the runtime virtual filesystem. -message Runtime { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.Runtime"; - - // Runtime resource name. This makes the Runtime a self-describing xDS - // resource. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - google.protobuf.Struct layer = 2; -} diff --git a/generated_api_shadow/envoy/service/secret/v3/BUILD b/generated_api_shadow/envoy/service/secret/v3/BUILD deleted file mode 100644 index fb6a1656ca9bf..0000000000000 --- a/generated_api_shadow/envoy/service/secret/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/service/discovery/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/secret/v3/sds.proto b/generated_api_shadow/envoy/service/secret/v3/sds.proto deleted file mode 100644 index 3c9441d7c7608..0000000000000 --- a/generated_api_shadow/envoy/service/secret/v3/sds.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.service.secret.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.secret.v3"; -option java_outer_classname = "SdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Secret Discovery Service (SDS)] - -service SecretDiscoveryService { - option (envoy.annotations.resource).type = "envoy.extensions.transport_sockets.tls.v3.Secret"; - - rpc DeltaSecrets(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc StreamSecrets(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc FetchSecrets(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:secrets"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message SdsDummy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.SdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/status/v2/BUILD b/generated_api_shadow/envoy/service/status/v2/BUILD deleted file mode 100644 index 39c38eb10a7cb..0000000000000 --- a/generated_api_shadow/envoy/service/status/v2/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/admin/v2alpha:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/status/v2/csds.proto b/generated_api_shadow/envoy/service/status/v2/csds.proto deleted file mode 100644 index 10f603cedb15a..0000000000000 --- a/generated_api_shadow/envoy/service/status/v2/csds.proto +++ /dev/null @@ -1,88 +0,0 @@ -syntax = "proto3"; - -package envoy.service.status.v2; - -import "envoy/admin/v2alpha/config_dump.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/type/matcher/node.proto"; - -import "google/api/annotations.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.status.v2"; -option java_outer_classname = "CsdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Client Status Discovery Service (CSDS)] - -// CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. In the -// future, it can potentially be used as an interface to get the current -// state directly from the client. -service ClientStatusDiscoveryService { - rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { - } - - rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { - option (google.api.http).post = "/v2/discovery:client_status"; - option (google.api.http).body = "*"; - } -} - -// Status of a config. -enum ConfigStatus { - // Status info is not available/unknown. - UNKNOWN = 0; - - // Management server has sent the config to client and received ACK. - SYNCED = 1; - - // Config is not sent. - NOT_SENT = 2; - - // Management server has sent the config to client but hasn’t received - // ACK/NACK. - STALE = 3; - - // Management server has sent the config to client but received NACK. - ERROR = 4; -} - -// Request for client status of clients identified by a list of NodeMatchers. -message ClientStatusRequest { - // Management server can use these match criteria to identify clients. - // The match follows OR semantics. - repeated type.matcher.NodeMatcher node_matchers = 1; -} - -// Detailed config (per xDS) with status. -// [#next-free-field: 6] -message PerXdsConfig { - ConfigStatus status = 1; - - oneof per_xds_config { - admin.v2alpha.ListenersConfigDump listener_config = 2; - - admin.v2alpha.ClustersConfigDump cluster_config = 3; - - admin.v2alpha.RoutesConfigDump route_config = 4; - - admin.v2alpha.ScopedRoutesConfigDump scoped_route_config = 5; - } -} - -// All xds configs for a particular client. -message ClientConfig { - // Node for a particular client. - api.v2.core.Node node = 1; - - repeated PerXdsConfig xds_config = 2; -} - -message ClientStatusResponse { - // Client configs for the clients specified in the ClientStatusRequest. - repeated ClientConfig config = 1; -} diff --git a/generated_api_shadow/envoy/service/status/v3/BUILD b/generated_api_shadow/envoy/service/status/v3/BUILD deleted file mode 100644 index a73963967ef77..0000000000000 --- a/generated_api_shadow/envoy/service/status/v3/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/admin/v3:pkg", - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/service/status/v2:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/status/v3/csds.proto b/generated_api_shadow/envoy/service/status/v3/csds.proto deleted file mode 100644 index 1d940d6a2dfe1..0000000000000 --- a/generated_api_shadow/envoy/service/status/v3/csds.proto +++ /dev/null @@ -1,191 +0,0 @@ -syntax = "proto3"; - -package envoy.service.status.v3; - -import "envoy/admin/v3/config_dump.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/type/matcher/v3/node.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.status.v3"; -option java_outer_classname = "CsdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Client Status Discovery Service (CSDS)] - -// CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. It can -// also be used to get the current xDS states directly from the client. -service ClientStatusDiscoveryService { - rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { - } - - rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { - option (google.api.http).post = "/v3/discovery:client_status"; - option (google.api.http).body = "*"; - } -} - -// Status of a config from a management server view. -enum ConfigStatus { - // Status info is not available/unknown. - UNKNOWN = 0; - - // Management server has sent the config to client and received ACK. - SYNCED = 1; - - // Config is not sent. - NOT_SENT = 2; - - // Management server has sent the config to client but hasn’t received - // ACK/NACK. - STALE = 3; - - // Management server has sent the config to client but received NACK. The - // attached config dump will be the latest config (the rejected one), since - // it is the persisted version in the management server. - ERROR = 4; -} - -// Config status from a client-side view. -enum ClientConfigStatus { - // Config status is not available/unknown. - CLIENT_UNKNOWN = 0; - - // Client requested the config but hasn't received any config from management - // server yet. - CLIENT_REQUESTED = 1; - - // Client received the config and replied with ACK. - CLIENT_ACKED = 2; - - // Client received the config and replied with NACK. Notably, the attached - // config dump is not the NACKed version, but the most recent accepted one. If - // no config is accepted yet, the attached config dump will be empty. - CLIENT_NACKED = 3; -} - -// Request for client status of clients identified by a list of NodeMatchers. -message ClientStatusRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v2.ClientStatusRequest"; - - // Management server can use these match criteria to identify clients. - // The match follows OR semantics. - repeated type.matcher.v3.NodeMatcher node_matchers = 1; - - // The node making the csds request. - config.core.v3.Node node = 2; -} - -// Detailed config (per xDS) with status. -// [#next-free-field: 8] -message PerXdsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v2.PerXdsConfig"; - - // Config status generated by management servers. Will not be present if the - // CSDS server is an xDS client. - ConfigStatus status = 1; - - // Client config status is populated by xDS clients. Will not be present if - // the CSDS server is an xDS server. No matter what the client config status - // is, xDS clients should always dump the most recent accepted xDS config. - // - // .. attention:: - // This field is deprecated. Use :ref:`ClientResourceStatus - // ` for per-resource - // config status instead. - ClientConfigStatus client_status = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - oneof per_xds_config { - admin.v3.ListenersConfigDump listener_config = 2; - - admin.v3.ClustersConfigDump cluster_config = 3; - - admin.v3.RoutesConfigDump route_config = 4; - - admin.v3.ScopedRoutesConfigDump scoped_route_config = 5; - - admin.v3.EndpointsConfigDump endpoint_config = 6; - } -} - -// All xds configs for a particular client. -message ClientConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v2.ClientConfig"; - - // GenericXdsConfig is used to specify the config status and the dump - // of any xDS resource identified by their type URL. It is the generalized - // version of the now deprecated ListenersConfigDump, ClustersConfigDump etc - // [#next-free-field: 10] - message GenericXdsConfig { - // Type_url represents the fully qualified name of xDS resource type - // like envoy.v3.Cluster, envoy.v3.ClusterLoadAssignment etc. - string type_url = 1; - - // Name of the xDS resource - string name = 2; - - // This is the :ref:`version_info ` - // in the last processed xDS discovery response. If there are only - // static bootstrap listeners, this field will be "" - string version_info = 3; - - // The xDS resource config. Actual content depends on the type - google.protobuf.Any xds_config = 4; - - // Timestamp when the xDS resource was last updated - google.protobuf.Timestamp last_updated = 5; - - // Per xDS resource config status. It is generated by management servers. - // It will not be present if the CSDS server is an xDS client. - ConfigStatus config_status = 6; - - // Per xDS resource status from the view of a xDS client - admin.v3.ClientResourceStatus client_status = 7; - - // Set if the last update failed, cleared after the next successful - // update. The *error_state* field contains the rejected version of - // this particular resource along with the reason and timestamp. For - // successfully updated or acknowledged resource, this field should - // be empty. - // [#not-implemented-hide:] - admin.v3.UpdateFailureState error_state = 8; - - // Is static resource is true if it is specified in the config supplied - // through the file at the startup. - bool is_static_resource = 9; - } - - // Node for a particular client. - config.core.v3.Node node = 1; - - // This field is deprecated in favor of generic_xds_configs which is - // much simpler and uniform in structure. - repeated PerXdsConfig xds_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Represents generic xDS config and the exact config structure depends on - // the type URL (like Cluster if it is CDS) - repeated GenericXdsConfig generic_xds_configs = 3; -} - -message ClientStatusResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v2.ClientStatusResponse"; - - // Client configs for the clients specified in the ClientStatusRequest. - repeated ClientConfig config = 1; -} diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/BUILD b/generated_api_shadow/envoy/service/tap/v2alpha/BUILD deleted file mode 100644 index 8e0561a169c5a..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v2alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/data/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/common.proto b/generated_api_shadow/envoy/service/tap/v2alpha/common.proto deleted file mode 100644 index 990a3826481bd..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v2alpha/common.proto +++ /dev/null @@ -1,205 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/grpc_service.proto"; -import "envoy/api/v2/route/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.tap.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common tap configuration] - -// Tap configuration. -message TapConfig { - // [#comment:TODO(mattklein123): Rate limiting] - - // The match configuration. If the configuration matches the data source being tapped, a tap will - // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; - - // The tap output configuration. If a match configuration matches a data source being tapped, - // a tap will occur and the data will be written to the configured output. - OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for - // which the tap matching is enabled. When not enabled, the request\connection will not be - // recorded. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - api.v2.core.RuntimeFractionalPercent tap_enabled = 3; -} - -// Tap match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 9] -message MatchPredicate { - // A set of match configurations used for logical operations. - message MatchSet { - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - // HTTP headers to match. - repeated api.v2.route.HeaderMatcher headers = 1; -} - -// Tap output configuration. -message OutputConfig { - // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple - // sink types are supported this constraint will be relaxed. - repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; - - // For buffered tapping, the maximum amount of received body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_rx_bytes = 2; - - // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_tx_bytes = 3; - - // Indicates whether taps produce a single buffered message per tap, or multiple streamed - // messages per tap in the emitted :ref:`TraceWrapper - // ` messages. Note that streamed tapping does not - // mean that no buffering takes place. Buffering may be required if data is processed before a - // match can be determined. See the HTTP tap filter :ref:`streaming - // ` documentation for more information. - bool streaming = 4; -} - -// Tap output sink configuration. -message OutputSink { - // Output format. All output is in the form of one or more :ref:`TraceWrapper - // ` messages. This enumeration indicates - // how those messages are written. Note that not all sinks support all output formats. See - // individual sink documentation for more information. - enum Format { - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_bytes - // ` field. This means that body data will be - // base64 encoded as per the `proto3 JSON mappings - // `_. - JSON_BODY_AS_BYTES = 0; - - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_string - // ` field. This means that body data will be - // string encoded as per the `proto3 JSON mappings - // `_. This format type is - // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the - // user wishes to view it directly without being forced to base64 decode the body. - JSON_BODY_AS_STRING = 1; - - // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes - // multiple binary messages without any length information the data stream will not be - // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) - // this output format makes consumption simpler. - PROTO_BINARY = 2; - - // Messages are written as a sequence tuples, where each tuple is the message length encoded - // as a `protobuf 32-bit varint - // `_ - // followed by the binary message. The messages can be read back using the language specific - // protobuf coded stream implementation to obtain the message length and the message. - PROTO_BINARY_LENGTH_DELIMITED = 3; - - // Text proto format. - PROTO_TEXT = 4; - } - - // Sink output format. - Format format = 1 [(validate.rules).enum = {defined_only: true}]; - - oneof output_sink_type { - option (validate.required) = true; - - // Tap output will be streamed out the :http:post:`/tap` admin endpoint. - // - // .. attention:: - // - // It is only allowed to specify the streaming admin output sink if the tap is being - // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has - // been configured to receive tap configuration from some other source (e.g., static - // file, XDS, etc.) configuring the streaming admin output type will fail. - StreamingAdminSink streaming_admin = 2; - - // Tap output will be written to a file per tap sink. - FilePerTapSink file_per_tap = 3; - - // [#not-implemented-hide:] - // GrpcService to stream data to. The format argument must be PROTO_BINARY. - StreamingGrpcSink streaming_grpc = 4; - } -} - -// Streaming admin sink configuration. -message StreamingAdminSink { -} - -// The file per tap sink outputs a discrete file for every tapped stream. -message FilePerTapSink { - // Path prefix. The output file will be of the form _.pb, where is an - // identifier distinguishing the recorded trace for stream instances (the Envoy - // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; -} - -// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC -// server. -message StreamingGrpcSink { - // Opaque identifier, that will be sent back to the streaming grpc server. - string tap_id = 1; - - // The gRPC server that hosts the Tap Sink Service. - api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto b/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto deleted file mode 100644 index 9fd18eae5d361..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/tap/v2alpha/wrapper.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap Sink Service] - -// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call -// StreamTaps to deliver captured taps to the server -service TapSinkService { - // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. - rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { - } -} - -// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server -// and stream taps without ever expecting a response. -message StreamTapsRequest { - message Identifier { - // The node sending taps over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - - // The opaque identifier that was set in the :ref:`output config - // `. - string tap_id = 2; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // The trace id. this can be used to merge together a streaming trace. Note that the trace_id - // is not guaranteed to be spatially or temporally unique. - uint64 trace_id = 2; - - // The trace data. - data.tap.v2alpha.TraceWrapper trace = 3; -} - -// [#not-implemented-hide:] -message StreamTapsResponse { -} diff --git a/generated_api_shadow/envoy/service/tap/v3/BUILD b/generated_api_shadow/envoy/service/tap/v3/BUILD deleted file mode 100644 index 5ee1ce553f48b..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/data/tap/v3:pkg", - "//envoy/service/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/tap/v3/tap.proto b/generated_api_shadow/envoy/service/tap/v3/tap.proto deleted file mode 100644 index 5d9866e570747..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v3/tap.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/data/tap/v3/wrapper.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v3"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap Sink Service] - -// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call -// StreamTaps to deliver captured taps to the server -service TapSinkService { - // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. - rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { - } -} - -// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server -// and stream taps without ever expecting a response. -message StreamTapsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamTapsRequest"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamTapsRequest.Identifier"; - - // The node sending taps over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - - // The opaque identifier that was set in the :ref:`output config - // `. - string tap_id = 2; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // The trace id. this can be used to merge together a streaming trace. Note that the trace_id - // is not guaranteed to be spatially or temporally unique. - uint64 trace_id = 2; - - // The trace data. - data.tap.v3.TraceWrapper trace = 3; -} - -// [#not-implemented-hide:] -message StreamTapsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamTapsResponse"; -} diff --git a/generated_api_shadow/envoy/service/trace/v2/BUILD b/generated_api_shadow/envoy/service/trace/v2/BUILD deleted file mode 100644 index 7e6d2b11bf163..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", - ], -) diff --git a/generated_api_shadow/envoy/service/trace/v2/trace_service.proto b/generated_api_shadow/envoy/service/trace/v2/trace_service.proto deleted file mode 100644 index 4e07f9e1f609b..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v2/trace_service.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.service.trace.v2; - -import "envoy/api/v2/core/base.proto"; - -import "opencensus/proto/trace/v1/trace.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.trace.v2"; -option java_outer_classname = "TraceServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Trace service] - -// Service for streaming traces to server that consumes the trace data. It -// uses OpenCensus data model as a standard to represent trace information. -service TraceService { - // Envoy will connect and send StreamTracesMessage messages forever. It does - // not expect any response to be sent as nothing would be done in the case - // of failure. - rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { - } -} - -message StreamTracesResponse { -} - -message StreamTracesMessage { - message Identifier { - // The node sending the access log messages over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. - // As a performance optimization this will only be sent in the first message - // on the stream. - Identifier identifier = 1; - - // A list of Span entries - repeated opencensus.proto.trace.v1.Span spans = 2; -} diff --git a/generated_api_shadow/envoy/service/trace/v3/BUILD b/generated_api_shadow/envoy/service/trace/v3/BUILD deleted file mode 100644 index a00d454ff9749..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", - ], -) diff --git a/generated_api_shadow/envoy/service/trace/v3/trace_service.proto b/generated_api_shadow/envoy/service/trace/v3/trace_service.proto deleted file mode 100644 index 65970593d7867..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v3/trace_service.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.service.trace.v3; - -import "envoy/config/core/v3/base.proto"; - -import "opencensus/proto/trace/v1/trace.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.trace.v3"; -option java_outer_classname = "TraceServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Trace service] - -// Service for streaming traces to server that consumes the trace data. It -// uses OpenCensus data model as a standard to represent trace information. -service TraceService { - // Envoy will connect and send StreamTracesMessage messages forever. It does - // not expect any response to be sent as nothing would be done in the case - // of failure. - rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { - } -} - -message StreamTracesResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v2.StreamTracesResponse"; -} - -message StreamTracesMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v2.StreamTracesMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v2.StreamTracesMessage.Identifier"; - - // The node sending the access log messages over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. - // As a performance optimization this will only be sent in the first message - // on the stream. - Identifier identifier = 1; - - // A list of Span entries - repeated opencensus.proto.trace.v1.Span spans = 2; -} diff --git a/generated_api_shadow/envoy/type/BUILD b/generated_api_shadow/envoy/type/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/type/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/type/hash_policy.proto b/generated_api_shadow/envoy/type/hash_policy.proto deleted file mode 100644 index b6aeb31fcbfde..0000000000000 --- a/generated_api_shadow/envoy/type/hash_policy.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "HashPolicyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Hash Policy] - -// Specifies the hash policy -message HashPolicy { - // The source IP will be used to compute the hash used by hash-based load balancing - // algorithms. - message SourceIp { - } - - oneof policy_specifier { - option (validate.required) = true; - - SourceIp source_ip = 1; - } -} diff --git a/generated_api_shadow/envoy/type/http.proto b/generated_api_shadow/envoy/type/http.proto deleted file mode 100644 index c1c787411fad8..0000000000000 --- a/generated_api_shadow/envoy/type/http.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "HttpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP] - -enum CodecClientType { - HTTP1 = 0; - - HTTP2 = 1; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 2; -} diff --git a/generated_api_shadow/envoy/type/http/v3/BUILD b/generated_api_shadow/envoy/type/http/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/type/http/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/type/http/v3/path_transformation.proto b/generated_api_shadow/envoy/type/http/v3/path_transformation.proto deleted file mode 100644 index 0b3d72009f5ff..0000000000000 --- a/generated_api_shadow/envoy/type/http/v3/path_transformation.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.type.http.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.http.v3"; -option java_outer_classname = "PathTransformationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Path Transformations API] - -// PathTransformation defines an API to apply a sequence of operations that can be used to alter -// text before it is used for matching or routing. Multiple actions can be applied in the same -// Transformation, forming a sequential pipeline. The transformations will be performed in the order -// that they appear. -// -// This API is a work in progress. - -message PathTransformation { - // A type of operation to alter text. - message Operation { - // Should text be normalized according to RFC 3986? This typically is used for path headers - // before any processing of requests by HTTP filters or routing. This applies percent-encoded - // normalization and path segment normalization. Fails on characters disallowed in URLs - // (e.g. NULLs). See `Normalization and Comparison - // `_ for details of normalization. Note that - // this options does not perform `case normalization - // `_ - message NormalizePathRFC3986 { - } - - // Determines if adjacent slashes are merged into one. A common use case is for a request path - // header. Using this option in `:ref: PathNormalizationOptions - // ` - // will allow incoming requests with path `//dir///file` to match against route with `prefix` - // match set to `/dir`. When using for header transformations, note that slash merging is not - // part of `HTTP spec `_ and is provided for convenience. - message MergeSlashes { - } - - oneof operation_specifier { - option (validate.required) = true; - - // Enable path normalization per RFC 3986. - NormalizePathRFC3986 normalize_path_rfc_3986 = 2; - - // Enable merging adjacent slashes. - MergeSlashes merge_slashes = 3; - } - } - - // A list of operations to apply. Transformations will be performed in the order that they appear. - repeated Operation operations = 1; -} diff --git a/generated_api_shadow/envoy/type/http_status.proto b/generated_api_shadow/envoy/type/http_status.proto deleted file mode 100644 index 99b44a98c2512..0000000000000 --- a/generated_api_shadow/envoy/type/http_status.proto +++ /dev/null @@ -1,139 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "HttpStatusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP status codes] - -// HTTP response codes supported in Envoy. -// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml -enum StatusCode { - // Empty - This code not part of the HTTP status code specification, but it is needed for proto - // `enum` type. - Empty = 0; - - Continue = 100; - - OK = 200; - - Created = 201; - - Accepted = 202; - - NonAuthoritativeInformation = 203; - - NoContent = 204; - - ResetContent = 205; - - PartialContent = 206; - - MultiStatus = 207; - - AlreadyReported = 208; - - IMUsed = 226; - - MultipleChoices = 300; - - MovedPermanently = 301; - - Found = 302; - - SeeOther = 303; - - NotModified = 304; - - UseProxy = 305; - - TemporaryRedirect = 307; - - PermanentRedirect = 308; - - BadRequest = 400; - - Unauthorized = 401; - - PaymentRequired = 402; - - Forbidden = 403; - - NotFound = 404; - - MethodNotAllowed = 405; - - NotAcceptable = 406; - - ProxyAuthenticationRequired = 407; - - RequestTimeout = 408; - - Conflict = 409; - - Gone = 410; - - LengthRequired = 411; - - PreconditionFailed = 412; - - PayloadTooLarge = 413; - - URITooLong = 414; - - UnsupportedMediaType = 415; - - RangeNotSatisfiable = 416; - - ExpectationFailed = 417; - - MisdirectedRequest = 421; - - UnprocessableEntity = 422; - - Locked = 423; - - FailedDependency = 424; - - UpgradeRequired = 426; - - PreconditionRequired = 428; - - TooManyRequests = 429; - - RequestHeaderFieldsTooLarge = 431; - - InternalServerError = 500; - - NotImplemented = 501; - - BadGateway = 502; - - ServiceUnavailable = 503; - - GatewayTimeout = 504; - - HTTPVersionNotSupported = 505; - - VariantAlsoNegotiates = 506; - - InsufficientStorage = 507; - - LoopDetected = 508; - - NotExtended = 510; - - NetworkAuthenticationRequired = 511; -} - -// HTTP status. -message HttpStatus { - // Supplies HTTP response code. - StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/BUILD b/generated_api_shadow/envoy/type/matcher/BUILD deleted file mode 100644 index 29613b4c3487b..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/matcher/metadata.proto b/generated_api_shadow/envoy/type/matcher/metadata.proto deleted file mode 100644 index ed58d04adb021..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/metadata.proto +++ /dev/null @@ -1,98 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/value.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metadata matcher] - -// MetadataMatcher provides a general interface to check if a given value is matched in -// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value -// from the Metadata and then check if it's matched to the specified value. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.filters.http.rbac: -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following MetadataMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to -// enforce access control based on dynamic metadata in a request. See :ref:`Permission -// ` and :ref:`Principal -// `. - -// [#next-major-version: MetadataMatcher should use StructMatcher] -message MetadataMatcher { - // Specifies the segment in a path to retrieve value from Metadata. - // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that - // if the segment key refers to a list, it has to be the last segment in a path. - message PathSegment { - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_len: 1}]; - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The MetadataMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/node.proto b/generated_api_shadow/envoy/type/matcher/node.proto deleted file mode 100644 index c9e84a46279ab..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/node.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/string.proto"; -import "envoy/type/matcher/struct.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Node matcher] - -// Specifies the way to match a Node. -// The match follows AND semantics. -message NodeMatcher { - // Specifies match criteria on the node id. - StringMatcher node_id = 1; - - // Specifies match criteria on the node metadata. - repeated StructMatcher node_metadatas = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/number.proto b/generated_api_shadow/envoy/type/matcher/number.proto deleted file mode 100644 index e488f16a4a0c9..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/number.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/range.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "NumberProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Number matcher] - -// Specifies the way to match a double value. -message DoubleMatcher { - oneof match_pattern { - option (validate.required) = true; - - // If specified, the input double value must be in the range specified here. - // Note: The range is using half-open interval semantics [start, end). - DoubleRange range = 1; - - // If specified, the input double value must be equal to the value specified here. - double exact = 2; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/path.proto b/generated_api_shadow/envoy/type/matcher/path.proto deleted file mode 100644 index 860a1c69f18a8..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/path.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "PathProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Path matcher] - -// Specifies the way to match a path on HTTP request. -message PathMatcher { - oneof rule { - option (validate.required) = true; - - // The `path` must match the URL path portion of the :path header. The query and fragment - // string (if present) are removed in the URL path portion. - // For example, the path */data* will match the *:path* header */data#fragment?param=value*. - StringMatcher path = 1 [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/regex.proto b/generated_api_shadow/envoy/type/matcher/regex.proto deleted file mode 100644 index 6c499235bbe23..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/regex.proto +++ /dev/null @@ -1,78 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "RegexProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Regex matcher] - -// A regex matcher designed for safety when used with untrusted input. -message RegexMatcher { - // Google's `RE2 `_ regex engine. The regex string must adhere to - // the documented `syntax `_. The engine is designed - // to complete execution in linear time as well as limit the amount of memory used. - // - // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` - // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or - // complexity that a compiled regex can have before an exception is thrown or a warning is - // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and - // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). - // - // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, - // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented - // each time the program size exceeds the warn level threshold. - message GoogleRE2 { - // This field controls the RE2 "program size" which is a rough estimate of how complex a - // compiled regex is to evaluate. A regex that has a program size greater than the configured - // value will fail to compile. In this case, the configured max program size can be increased - // or the regex can be simplified. If not specified, the default is 100. - // - // This field is deprecated; regexp validation should be performed on the management server - // instead of being done by each individual client. - google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; - } - - oneof engine_type { - option (validate.required) = true; - - // Google's RE2 regex engine. - GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; - } - - // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Describes how to match a string and then produce a new string using a regular -// expression and a substitution string. -message RegexMatchAndSubstitute { - // The regular expression used to find portions of a string (hereafter called - // the "subject string") that should be replaced. When a new string is - // produced during the substitution operation, the new string is initially - // the same as the subject string, but then all matches in the subject string - // are replaced by the substitution string. If replacing all matches isn't - // desired, regular expression anchors can be used to ensure a single match, - // so as to replace just one occurrence of a pattern. Capture groups can be - // used in the pattern to extract portions of the subject string, and then - // referenced in the substitution string. - RegexMatcher pattern = 1; - - // The string that should be substituted into matching portions of the - // subject string during a substitution operation to produce a new string. - // Capture groups in the pattern can be referenced in the substitution - // string. Note, however, that the syntax for referring to capture groups is - // defined by the chosen regular expression engine. Google's `RE2 - // `_ regular expression engine uses a - // backslash followed by the capture group number to denote a numbered - // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers - // to capture group 2. - string substitution = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/string.proto b/generated_api_shadow/envoy/type/matcher/string.proto deleted file mode 100644 index 499eaf21775f8..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/string.proto +++ /dev/null @@ -1,79 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/regex.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "StringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: String matcher] - -// Specifies the way to match a string. -// [#next-free-field: 7] -message StringMatcher { - oneof match_pattern { - option (validate.required) = true; - - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - string exact = 1; - - // The input string must have the prefix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_len: 1}]; - - // The input string must have the suffix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_len: 1}]; - - // The input string must match the regular expression specified here. - // The regex grammar is defined `here - // `_. - // - // Examples: - // - // * The regex ``\d{3}`` matches the value *123* - // * The regex ``\d{3}`` does not match the value *1234* - // * The regex ``\d{3}`` does not match the value *123.456* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex` as it is not safe for use with - // untrusted input in all cases. - string regex = 4 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // The input string must match the regular expression specified here. - RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; - } - - // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no - // effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. - bool ignore_case = 6; -} - -// Specifies a list of ways to match a string. -message ListStringMatcher { - repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/struct.proto b/generated_api_shadow/envoy/type/matcher/struct.proto deleted file mode 100644 index 10d4672e0622b..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/struct.proto +++ /dev/null @@ -1,84 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/value.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Struct matcher] - -// StructMatcher provides a general interface to check if a given value is matched in -// google.protobuf.Struct. It uses `path` to retrieve the value -// from the struct and then check if it's matched to the specified value. -// -// For example, for the following Struct: -// -// .. code-block:: yaml -// -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following StructMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. -message StructMatcher { - // Specifies the segment in a path to retrieve value from Struct. - message PathSegment { - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The StructMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/BUILD b/generated_api_shadow/envoy/type/matcher/v3/BUILD deleted file mode 100644 index a117fd27e4ff0..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type/matcher:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/matcher/v3/http_inputs.proto b/generated_api_shadow/envoy/type/matcher/v3/http_inputs.proto deleted file mode 100644 index 403e4676f7b96..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/http_inputs.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "HttpInputsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common HTTP Inputs] - -// Match input indicates that matching should be done on a specific request header. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpRequestHeaderMatchInput { - // The request header to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicates that matching should be done on a specific request trailer. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpRequestTrailerMatchInput { - // The request trailer to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicating that matching should be done on a specific response header. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the response contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpResponseHeaderMatchInput { - // The response header to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicates that matching should be done on a specific response trailer. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpResponseTrailerMatchInput { - // The response trailer to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto b/generated_api_shadow/envoy/type/matcher/v3/metadata.proto deleted file mode 100644 index de19a2f34dbd1..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto +++ /dev/null @@ -1,107 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/value.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metadata matcher] - -// MetadataMatcher provides a general interface to check if a given value is matched in -// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value -// from the Metadata and then check if it's matched to the specified value. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.filters.http.rbac: -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following MetadataMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to -// enforce access control based on dynamic metadata in a request. See :ref:`Permission -// ` and :ref:`Principal -// `. - -// [#next-major-version: MetadataMatcher should use StructMatcher] -message MetadataMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.MetadataMatcher"; - - // Specifies the segment in a path to retrieve value from Metadata. - // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that - // if the segment key refers to a list, it has to be the last segment in a path. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.MetadataMatcher.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_len: 1}]; - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The MetadataMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; - - // If true, the match result will be inverted. - bool invert = 4; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/node.proto b/generated_api_shadow/envoy/type/matcher/v3/node.proto deleted file mode 100644 index fe507312135ff..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/node.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/matcher/v3/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Node matcher] - -// Specifies the way to match a Node. -// The match follows AND semantics. -message NodeMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.NodeMatcher"; - - // Specifies match criteria on the node id. - StringMatcher node_id = 1; - - // Specifies match criteria on the node metadata. - repeated StructMatcher node_metadatas = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/number.proto b/generated_api_shadow/envoy/type/matcher/v3/number.proto deleted file mode 100644 index 2379efdcbd23a..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/number.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/v3/range.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "NumberProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Number matcher] - -// Specifies the way to match a double value. -message DoubleMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.DoubleMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // If specified, the input double value must be in the range specified here. - // Note: The range is using half-open interval semantics [start, end). - type.v3.DoubleRange range = 1; - - // If specified, the input double value must be equal to the value specified here. - double exact = 2; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/path.proto b/generated_api_shadow/envoy/type/matcher/v3/path.proto deleted file mode 100644 index 0ce89871c9d9f..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/path.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "PathProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Path matcher] - -// Specifies the way to match a path on HTTP request. -message PathMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.PathMatcher"; - - oneof rule { - option (validate.required) = true; - - // The `path` must match the URL path portion of the :path header. The query and fragment - // string (if present) are removed in the URL path portion. - // For example, the path */data* will match the *:path* header */data#fragment?param=value*. - StringMatcher path = 1 [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/regex.proto b/generated_api_shadow/envoy/type/matcher/v3/regex.proto deleted file mode 100644 index 3e7bb477ecbf0..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/regex.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "RegexProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Regex matcher] - -// A regex matcher designed for safety when used with untrusted input. -message RegexMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher"; - - // Google's `RE2 `_ regex engine. The regex string must adhere to - // the documented `syntax `_. The engine is designed - // to complete execution in linear time as well as limit the amount of memory used. - // - // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` - // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or - // complexity that a compiled regex can have before an exception is thrown or a warning is - // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and - // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). - // - // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, - // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented - // each time the program size exceeds the warn level threshold. - message GoogleRE2 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.RegexMatcher.GoogleRE2"; - - // This field controls the RE2 "program size" which is a rough estimate of how complex a - // compiled regex is to evaluate. A regex that has a program size greater than the configured - // value will fail to compile. In this case, the configured max program size can be increased - // or the regex can be simplified. If not specified, the default is 100. - // - // This field is deprecated; regexp validation should be performed on the management server - // instead of being done by each individual client. - google.protobuf.UInt32Value max_program_size = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - oneof engine_type { - option (validate.required) = true; - - // Google's RE2 regex engine. - GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; - } - - // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Describes how to match a string and then produce a new string using a regular -// expression and a substitution string. -message RegexMatchAndSubstitute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.RegexMatchAndSubstitute"; - - // The regular expression used to find portions of a string (hereafter called - // the "subject string") that should be replaced. When a new string is - // produced during the substitution operation, the new string is initially - // the same as the subject string, but then all matches in the subject string - // are replaced by the substitution string. If replacing all matches isn't - // desired, regular expression anchors can be used to ensure a single match, - // so as to replace just one occurrence of a pattern. Capture groups can be - // used in the pattern to extract portions of the subject string, and then - // referenced in the substitution string. - RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; - - // The string that should be substituted into matching portions of the - // subject string during a substitution operation to produce a new string. - // Capture groups in the pattern can be referenced in the substitution - // string. Note, however, that the syntax for referring to capture groups is - // defined by the chosen regular expression engine. Google's `RE2 - // `_ regular expression engine uses a - // backslash followed by the capture group number to denote a numbered - // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers - // to capture group 2. - string substitution = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/string.proto b/generated_api_shadow/envoy/type/matcher/v3/string.proto deleted file mode 100644 index 4dc7cacffae6e..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/string.proto +++ /dev/null @@ -1,81 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/regex.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "StringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: String matcher] - -// Specifies the way to match a string. -// [#next-free-field: 8] -message StringMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StringMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - string exact = 1; - - // The input string must have the prefix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_len: 1}]; - - // The input string must have the suffix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_len: 1}]; - - // The input string must match the regular expression specified here. - RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; - - // The input string must have the substring specified here. - // Note: empty contains match is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc.def* - string contains = 7 [(validate.rules).string = {min_len: 1}]; - - string hidden_envoy_deprecated_regex = 4 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This - // has no effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. - bool ignore_case = 6; -} - -// Specifies a list of ways to match a string. -message ListStringMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.ListStringMatcher"; - - repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/struct.proto b/generated_api_shadow/envoy/type/matcher/v3/struct.proto deleted file mode 100644 index c753d07a5c0ac..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/struct.proto +++ /dev/null @@ -1,90 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/value.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Struct matcher] - -// StructMatcher provides a general interface to check if a given value is matched in -// google.protobuf.Struct. It uses `path` to retrieve the value -// from the struct and then check if it's matched to the specified value. -// -// For example, for the following Struct: -// -// .. code-block:: yaml -// -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following StructMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. -message StructMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StructMatcher"; - - // Specifies the segment in a path to retrieve value from Struct. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.StructMatcher.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The StructMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/value.proto b/generated_api_shadow/envoy/type/matcher/v3/value.proto deleted file mode 100644 index 040332273ba35..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/value.proto +++ /dev/null @@ -1,71 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/number.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "ValueProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Value matcher] - -// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. -// StructValue is not supported and is always not matched. -// [#next-free-field: 7] -message ValueMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ValueMatcher"; - - // NullMatch is an empty message to specify a null value. - message NullMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.ValueMatcher.NullMatch"; - } - - // Specifies how to match a value. - oneof match_pattern { - option (validate.required) = true; - - // If specified, a match occurs if and only if the target value is a NullValue. - NullMatch null_match = 1; - - // If specified, a match occurs if and only if the target value is a double value and is - // matched to this field. - DoubleMatcher double_match = 2; - - // If specified, a match occurs if and only if the target value is a string value and is - // matched to this field. - StringMatcher string_match = 3; - - // If specified, a match occurs if and only if the target value is a bool value and is equal - // to this field. - bool bool_match = 4; - - // If specified, value match will be performed based on whether the path is referring to a - // valid primitive value in the metadata. If the path is referring to a non-primitive value, - // the result is always not matched. - bool present_match = 5; - - // If specified, a match occurs if and only if the target value is a list value and - // is matched to this field. - ListMatcher list_match = 6; - } -} - -// Specifies the way to match a list value. -message ListMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ListMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // If specified, at least one of the values in the list must match the value specified. - ValueMatcher one_of = 1; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/value.proto b/generated_api_shadow/envoy/type/matcher/value.proto deleted file mode 100644 index aaecd14e8ecd4..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/value.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/number.proto"; -import "envoy/type/matcher/string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "ValueProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Value matcher] - -// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. -// StructValue is not supported and is always not matched. -// [#next-free-field: 7] -message ValueMatcher { - // NullMatch is an empty message to specify a null value. - message NullMatch { - } - - // Specifies how to match a value. - oneof match_pattern { - option (validate.required) = true; - - // If specified, a match occurs if and only if the target value is a NullValue. - NullMatch null_match = 1; - - // If specified, a match occurs if and only if the target value is a double value and is - // matched to this field. - DoubleMatcher double_match = 2; - - // If specified, a match occurs if and only if the target value is a string value and is - // matched to this field. - StringMatcher string_match = 3; - - // If specified, a match occurs if and only if the target value is a bool value and is equal - // to this field. - bool bool_match = 4; - - // If specified, value match will be performed based on whether the path is referring to a - // valid primitive value in the metadata. If the path is referring to a non-primitive value, - // the result is always not matched. - bool present_match = 5; - - // If specified, a match occurs if and only if the target value is a list value and - // is matched to this field. - ListMatcher list_match = 6; - } -} - -// Specifies the way to match a list value. -message ListMatcher { - oneof match_pattern { - option (validate.required) = true; - - // If specified, at least one of the values in the list must match the value specified. - ValueMatcher one_of = 1; - } -} diff --git a/generated_api_shadow/envoy/type/metadata/v2/BUILD b/generated_api_shadow/envoy/type/metadata/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/type/metadata/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/type/metadata/v2/metadata.proto b/generated_api_shadow/envoy/type/metadata/v2/metadata.proto deleted file mode 100644 index 43a1a7ca92750..0000000000000 --- a/generated_api_shadow/envoy/type/metadata/v2/metadata.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; - -package envoy.type.metadata.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.metadata.v2"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.type.metadata.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metadata] - -// MetadataKey provides a general interface using `key` and `path` to retrieve value from -// :ref:`Metadata `. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.xxx: -// prop: -// foo: bar -// xyz: -// hello: envoy -// -// The following MetadataKey will retrieve a string value "bar" from the Metadata. -// -// .. code-block:: yaml -// -// key: envoy.xxx -// path: -// - key: prop -// - key: foo -// -message MetadataKey { - // Specifies the segment in a path to retrieve value from Metadata. - // Currently it is only supported to specify the key, i.e. field name, as one segment of a path. - message PathSegment { - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; - } - } - - // The key name of Metadata to retrieve the Struct from the metadata. - // Typically, it represents a builtin subsystem or custom extension. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The path to retrieve the Value from the Struct. It can be a prefix or a full path, - // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example, - // which depends on the particular scenario. - // - // Note: Due to that only the key type segment is supported, the path can not specify a list - // unless the list is the last segment. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// Describes what kind of metadata. -message MetadataKind { - // Represents dynamic metadata associated with the request. - message Request { - } - - // Represents metadata from :ref:`the route`. - message Route { - } - - // Represents metadata from :ref:`the upstream cluster`. - message Cluster { - } - - // Represents metadata from :ref:`the upstream - // host`. - message Host { - } - - oneof kind { - option (validate.required) = true; - - // Request kind of metadata. - Request request = 1; - - // Route kind of metadata. - Route route = 2; - - // Cluster kind of metadata. - Cluster cluster = 3; - - // Host kind of metadata. - Host host = 4; - } -} diff --git a/generated_api_shadow/envoy/type/metadata/v3/BUILD b/generated_api_shadow/envoy/type/metadata/v3/BUILD deleted file mode 100644 index aa64935f43d18..0000000000000 --- a/generated_api_shadow/envoy/type/metadata/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/metadata/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/metadata/v3/metadata.proto b/generated_api_shadow/envoy/type/metadata/v3/metadata.proto deleted file mode 100644 index 5dd58b23c6231..0000000000000 --- a/generated_api_shadow/envoy/type/metadata/v3/metadata.proto +++ /dev/null @@ -1,114 +0,0 @@ -syntax = "proto3"; - -package envoy.type.metadata.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.metadata.v3"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metadata] - -// MetadataKey provides a general interface using `key` and `path` to retrieve value from -// :ref:`Metadata `. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.xxx: -// prop: -// foo: bar -// xyz: -// hello: envoy -// -// The following MetadataKey will retrieve a string value "bar" from the Metadata. -// -// .. code-block:: yaml -// -// key: envoy.xxx -// path: -// - key: prop -// - key: foo -// -message MetadataKey { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKey"; - - // Specifies the segment in a path to retrieve value from Metadata. - // Currently it is only supported to specify the key, i.e. field name, as one segment of a path. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKey.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The key name of Metadata to retrieve the Struct from the metadata. - // Typically, it represents a builtin subsystem or custom extension. - string key = 1 [(validate.rules).string = {min_len: 1}]; - - // The path to retrieve the Value from the Struct. It can be a prefix or a full path, - // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example, - // which depends on the particular scenario. - // - // Note: Due to that only the key type segment is supported, the path can not specify a list - // unless the list is the last segment. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// Describes what kind of metadata. -message MetadataKind { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind"; - - // Represents dynamic metadata associated with the request. - message Request { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind.Request"; - } - - // Represents metadata from :ref:`the route`. - message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind.Route"; - } - - // Represents metadata from :ref:`the upstream cluster`. - message Cluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind.Cluster"; - } - - // Represents metadata from :ref:`the upstream - // host`. - message Host { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind.Host"; - } - - oneof kind { - option (validate.required) = true; - - // Request kind of metadata. - Request request = 1; - - // Route kind of metadata. - Route route = 2; - - // Cluster kind of metadata. - Cluster cluster = 3; - - // Host kind of metadata. - Host host = 4; - } -} diff --git a/generated_api_shadow/envoy/type/percent.proto b/generated_api_shadow/envoy/type/percent.proto deleted file mode 100644 index fc41a26662fe7..0000000000000 --- a/generated_api_shadow/envoy/type/percent.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "PercentProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Percent] - -// Identifies a percentage, in the range [0.0, 100.0]. -message Percent { - double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; -} - -// A fractional percentage is used in cases in which for performance reasons performing floating -// point to integer conversions during randomness calculations is undesirable. The message includes -// both a numerator and denominator that together determine the final fractional value. -// -// * **Example**: 1/100 = 1%. -// * **Example**: 3/10000 = 0.03%. -message FractionalPercent { - // Fraction percentages support several fixed denominator values. - enum DenominatorType { - // 100. - // - // **Example**: 1/100 = 1%. - HUNDRED = 0; - - // 10,000. - // - // **Example**: 1/10000 = 0.01%. - TEN_THOUSAND = 1; - - // 1,000,000. - // - // **Example**: 1/1000000 = 0.0001%. - MILLION = 2; - } - - // Specifies the numerator. Defaults to 0. - uint32 numerator = 1; - - // Specifies the denominator. If the denominator specified is less than the numerator, the final - // fractional percentage is capped at 1 (100%). - DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/type/range.proto b/generated_api_shadow/envoy/type/range.proto deleted file mode 100644 index 79aaa81975c38..0000000000000 --- a/generated_api_shadow/envoy/type/range.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "RangeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Range] - -// Specifies the int64 start and end of the range using half-open interval semantics [start, -// end). -message Int64Range { - // start of the range (inclusive) - int64 start = 1; - - // end of the range (exclusive) - int64 end = 2; -} - -// Specifies the int32 start and end of the range using half-open interval semantics [start, -// end). -message Int32Range { - // start of the range (inclusive) - int32 start = 1; - - // end of the range (exclusive) - int32 end = 2; -} - -// Specifies the double start and end of the range using half-open interval semantics [start, -// end). -message DoubleRange { - // start of the range (inclusive) - double start = 1; - - // end of the range (exclusive) - double end = 2; -} diff --git a/generated_api_shadow/envoy/type/semantic_version.proto b/generated_api_shadow/envoy/type/semantic_version.proto deleted file mode 100644 index 80fe016bfa161..0000000000000 --- a/generated_api_shadow/envoy/type/semantic_version.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "SemanticVersionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Semantic Version] - -// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate -// expected behaviors and APIs, the patch version field is used only -// for security fixes and can be generally ignored. -message SemanticVersion { - uint32 major_number = 1; - - uint32 minor_number = 2; - - uint32 patch = 3; -} diff --git a/generated_api_shadow/envoy/type/token_bucket.proto b/generated_api_shadow/envoy/type/token_bucket.proto deleted file mode 100644 index 41b6d268d5f6f..0000000000000 --- a/generated_api_shadow/envoy/type/token_bucket.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "TokenBucketProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Token bucket] - -// Configures a token bucket, typically used for rate limiting. -message TokenBucket { - // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket - // initially contains. - uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}]; - - // The number of tokens added to the bucket during each fill interval. If not specified, defaults - // to a single token. - google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}]; - - // The fill interval that tokens are added to the bucket. During each fill interval - // `tokens_per_fill` are added to the bucket. The bucket will never contain more than - // `max_tokens` tokens. - google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = { - required: true - gt {} - }]; -} diff --git a/generated_api_shadow/envoy/type/tracing/v2/BUILD b/generated_api_shadow/envoy/type/tracing/v2/BUILD deleted file mode 100644 index aa64935f43d18..0000000000000 --- a/generated_api_shadow/envoy/type/tracing/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/metadata/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto b/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto deleted file mode 100644 index 7506ae8861254..0000000000000 --- a/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto +++ /dev/null @@ -1,86 +0,0 @@ -syntax = "proto3"; - -package envoy.type.tracing.v2; - -import "envoy/type/metadata/v2/metadata.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.tracing.v2"; -option java_outer_classname = "CustomTagProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Custom Tag] - -// Describes custom tags for the active span. -// [#next-free-field: 6] -message CustomTag { - // Literal type custom tag with static value for the tag value. - message Literal { - // Static literal value to populate the tag value. - string value = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // Environment type custom tag with environment name and default value. - message Environment { - // Environment variable name to obtain the value to populate the tag value. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // When the environment variable is not found, - // the tag value will be populated with this default value if specified, - // otherwise no tag will be populated. - string default_value = 2; - } - - // Header type custom tag with header name and default value. - message Header { - // Header name to obtain the value to populate the tag value. - string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // When the header does not exist, - // the tag value will be populated with this default value if specified, - // otherwise no tag will be populated. - string default_value = 2; - } - - // Metadata type custom tag using - // :ref:`MetadataKey ` to retrieve the protobuf value - // from :ref:`Metadata `, and populate the tag value with - // `the canonical JSON `_ - // representation of it. - message Metadata { - // Specify what kind of metadata to obtain tag value from. - metadata.v2.MetadataKind kind = 1; - - // Metadata key to define the path to retrieve the tag value. - metadata.v2.MetadataKey metadata_key = 2; - - // When no valid metadata is found, - // the tag value would be populated with this default value if specified, - // otherwise no tag would be populated. - string default_value = 3; - } - - // Used to populate the tag name. - string tag = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Used to specify what kind of custom tag. - oneof type { - option (validate.required) = true; - - // A literal custom tag. - Literal literal = 2; - - // An environment custom tag. - Environment environment = 3; - - // A request header custom tag. - Header request_header = 4; - - // A custom tag to obtain tag value from the metadata. - Metadata metadata = 5; - } -} diff --git a/generated_api_shadow/envoy/type/tracing/v3/BUILD b/generated_api_shadow/envoy/type/tracing/v3/BUILD deleted file mode 100644 index 38eb160d482bf..0000000000000 --- a/generated_api_shadow/envoy/type/tracing/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/metadata/v3:pkg", - "//envoy/type/tracing/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto b/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto deleted file mode 100644 index ad99cafb22bf4..0000000000000 --- a/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto +++ /dev/null @@ -1,101 +0,0 @@ -syntax = "proto3"; - -package envoy.type.tracing.v3; - -import "envoy/type/metadata/v3/metadata.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.tracing.v3"; -option java_outer_classname = "CustomTagProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Custom Tag] - -// Describes custom tags for the active span. -// [#next-free-field: 6] -message CustomTag { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.tracing.v2.CustomTag"; - - // Literal type custom tag with static value for the tag value. - message Literal { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.tracing.v2.CustomTag.Literal"; - - // Static literal value to populate the tag value. - string value = 1 [(validate.rules).string = {min_len: 1}]; - } - - // Environment type custom tag with environment name and default value. - message Environment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.tracing.v2.CustomTag.Environment"; - - // Environment variable name to obtain the value to populate the tag value. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When the environment variable is not found, - // the tag value will be populated with this default value if specified, - // otherwise no tag will be populated. - string default_value = 2; - } - - // Header type custom tag with header name and default value. - message Header { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.tracing.v2.CustomTag.Header"; - - // Header name to obtain the value to populate the tag value. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // When the header does not exist, - // the tag value will be populated with this default value if specified, - // otherwise no tag will be populated. - string default_value = 2; - } - - // Metadata type custom tag using - // :ref:`MetadataKey ` to retrieve the protobuf value - // from :ref:`Metadata `, and populate the tag value with - // `the canonical JSON `_ - // representation of it. - message Metadata { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.tracing.v2.CustomTag.Metadata"; - - // Specify what kind of metadata to obtain tag value from. - metadata.v3.MetadataKind kind = 1; - - // Metadata key to define the path to retrieve the tag value. - metadata.v3.MetadataKey metadata_key = 2; - - // When no valid metadata is found, - // the tag value would be populated with this default value if specified, - // otherwise no tag would be populated. - string default_value = 3; - } - - // Used to populate the tag name. - string tag = 1 [(validate.rules).string = {min_len: 1}]; - - // Used to specify what kind of custom tag. - oneof type { - option (validate.required) = true; - - // A literal custom tag. - Literal literal = 2; - - // An environment custom tag. - Environment environment = 3; - - // A request header custom tag. - Header request_header = 4; - - // A custom tag to obtain tag value from the metadata. - Metadata metadata = 5; - } -} diff --git a/generated_api_shadow/envoy/type/v3/BUILD b/generated_api_shadow/envoy/type/v3/BUILD deleted file mode 100644 index da3a8659d2a8b..0000000000000 --- a/generated_api_shadow/envoy/type/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/v3/hash_policy.proto b/generated_api_shadow/envoy/type/v3/hash_policy.proto deleted file mode 100644 index 96c39299698fc..0000000000000 --- a/generated_api_shadow/envoy/type/v3/hash_policy.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "HashPolicyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Hash Policy] - -// Specifies the hash policy -message HashPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy"; - - // The source IP will be used to compute the hash used by hash-based load balancing - // algorithms. - message SourceIp { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy.SourceIp"; - } - - oneof policy_specifier { - option (validate.required) = true; - - SourceIp source_ip = 1; - } -} diff --git a/generated_api_shadow/envoy/type/v3/http.proto b/generated_api_shadow/envoy/type/v3/http.proto deleted file mode 100644 index fec15d11f871c..0000000000000 --- a/generated_api_shadow/envoy/type/v3/http.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "HttpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP] - -enum CodecClientType { - HTTP1 = 0; - - HTTP2 = 1; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 2; -} diff --git a/generated_api_shadow/envoy/type/v3/http_status.proto b/generated_api_shadow/envoy/type/v3/http_status.proto deleted file mode 100644 index 8914b7a0264ae..0000000000000 --- a/generated_api_shadow/envoy/type/v3/http_status.proto +++ /dev/null @@ -1,142 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "HttpStatusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP status codes] - -// HTTP response codes supported in Envoy. -// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml -enum StatusCode { - // Empty - This code not part of the HTTP status code specification, but it is needed for proto - // `enum` type. - Empty = 0; - - Continue = 100; - - OK = 200; - - Created = 201; - - Accepted = 202; - - NonAuthoritativeInformation = 203; - - NoContent = 204; - - ResetContent = 205; - - PartialContent = 206; - - MultiStatus = 207; - - AlreadyReported = 208; - - IMUsed = 226; - - MultipleChoices = 300; - - MovedPermanently = 301; - - Found = 302; - - SeeOther = 303; - - NotModified = 304; - - UseProxy = 305; - - TemporaryRedirect = 307; - - PermanentRedirect = 308; - - BadRequest = 400; - - Unauthorized = 401; - - PaymentRequired = 402; - - Forbidden = 403; - - NotFound = 404; - - MethodNotAllowed = 405; - - NotAcceptable = 406; - - ProxyAuthenticationRequired = 407; - - RequestTimeout = 408; - - Conflict = 409; - - Gone = 410; - - LengthRequired = 411; - - PreconditionFailed = 412; - - PayloadTooLarge = 413; - - URITooLong = 414; - - UnsupportedMediaType = 415; - - RangeNotSatisfiable = 416; - - ExpectationFailed = 417; - - MisdirectedRequest = 421; - - UnprocessableEntity = 422; - - Locked = 423; - - FailedDependency = 424; - - UpgradeRequired = 426; - - PreconditionRequired = 428; - - TooManyRequests = 429; - - RequestHeaderFieldsTooLarge = 431; - - InternalServerError = 500; - - NotImplemented = 501; - - BadGateway = 502; - - ServiceUnavailable = 503; - - GatewayTimeout = 504; - - HTTPVersionNotSupported = 505; - - VariantAlsoNegotiates = 506; - - InsufficientStorage = 507; - - LoopDetected = 508; - - NotExtended = 510; - - NetworkAuthenticationRequired = 511; -} - -// HTTP status. -message HttpStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.HttpStatus"; - - // Supplies HTTP response code. - StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; -} diff --git a/generated_api_shadow/envoy/type/v3/percent.proto b/generated_api_shadow/envoy/type/v3/percent.proto deleted file mode 100644 index 3a89a3f44fd5f..0000000000000 --- a/generated_api_shadow/envoy/type/v3/percent.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "PercentProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Percent] - -// Identifies a percentage, in the range [0.0, 100.0]. -message Percent { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.Percent"; - - double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; -} - -// A fractional percentage is used in cases in which for performance reasons performing floating -// point to integer conversions during randomness calculations is undesirable. The message includes -// both a numerator and denominator that together determine the final fractional value. -// -// * **Example**: 1/100 = 1%. -// * **Example**: 3/10000 = 0.03%. -message FractionalPercent { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.FractionalPercent"; - - // Fraction percentages support several fixed denominator values. - enum DenominatorType { - // 100. - // - // **Example**: 1/100 = 1%. - HUNDRED = 0; - - // 10,000. - // - // **Example**: 1/10000 = 0.01%. - TEN_THOUSAND = 1; - - // 1,000,000. - // - // **Example**: 1/1000000 = 0.0001%. - MILLION = 2; - } - - // Specifies the numerator. Defaults to 0. - uint32 numerator = 1; - - // Specifies the denominator. If the denominator specified is less than the numerator, the final - // fractional percentage is capped at 1 (100%). - DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/type/v3/range.proto b/generated_api_shadow/envoy/type/v3/range.proto deleted file mode 100644 index de1d55b09a214..0000000000000 --- a/generated_api_shadow/envoy/type/v3/range.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "RangeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Range] - -// Specifies the int64 start and end of the range using half-open interval semantics [start, -// end). -message Int64Range { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.Int64Range"; - - // start of the range (inclusive) - int64 start = 1; - - // end of the range (exclusive) - int64 end = 2; -} - -// Specifies the int32 start and end of the range using half-open interval semantics [start, -// end). -message Int32Range { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.Int32Range"; - - // start of the range (inclusive) - int32 start = 1; - - // end of the range (exclusive) - int32 end = 2; -} - -// Specifies the double start and end of the range using half-open interval semantics [start, -// end). -message DoubleRange { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.DoubleRange"; - - // start of the range (inclusive) - double start = 1; - - // end of the range (exclusive) - double end = 2; -} diff --git a/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto b/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto deleted file mode 100644 index a3fb27ff47ba0..0000000000000 --- a/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "RatelimitUnitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Ratelimit Time Unit] - -// Identifies the unit of of time for rate limit. -enum RateLimitUnit { - // The time unit is not known. - UNKNOWN = 0; - - // The time unit representing a second. - SECOND = 1; - - // The time unit representing a minute. - MINUTE = 2; - - // The time unit representing an hour. - HOUR = 3; - - // The time unit representing a day. - DAY = 4; -} diff --git a/generated_api_shadow/envoy/type/v3/semantic_version.proto b/generated_api_shadow/envoy/type/v3/semantic_version.proto deleted file mode 100644 index a4126336f03ae..0000000000000 --- a/generated_api_shadow/envoy/type/v3/semantic_version.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "SemanticVersionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Semantic Version] - -// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate -// expected behaviors and APIs, the patch version field is used only -// for security fixes and can be generally ignored. -message SemanticVersion { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.SemanticVersion"; - - uint32 major_number = 1; - - uint32 minor_number = 2; - - uint32 patch = 3; -} diff --git a/generated_api_shadow/envoy/type/v3/token_bucket.proto b/generated_api_shadow/envoy/type/v3/token_bucket.proto deleted file mode 100644 index a96d50fbd0abc..0000000000000 --- a/generated_api_shadow/envoy/type/v3/token_bucket.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "TokenBucketProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Token bucket] - -// Configures a token bucket, typically used for rate limiting. -message TokenBucket { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.TokenBucket"; - - // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket - // initially contains. - uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}]; - - // The number of tokens added to the bucket during each fill interval. If not specified, defaults - // to a single token. - google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}]; - - // The fill interval that tokens are added to the bucket. During each fill interval - // `tokens_per_fill` are added to the bucket. The bucket will never contain more than - // `max_tokens` tokens. - google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = { - required: true - gt {} - }]; -} diff --git a/generated_api_shadow/envoy/watchdog/v3alpha/BUILD b/generated_api_shadow/envoy/watchdog/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/watchdog/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto b/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto deleted file mode 100644 index d6f34aa892cdb..0000000000000 --- a/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.watchdog.v3alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.watchdog.v3alpha"; -option java_outer_classname = "AbortActionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.] - -// A GuardDogAction that will terminate the process by killing the -// stuck thread. This would allow easier access to the call stack of the stuck -// thread since we would run signal handlers on that thread. By default -// this will be registered to run as the last watchdog action on KILL and -// MULTIKILL events if those are enabled. -message AbortActionConfig { - // How long to wait for the thread to respond to the thread kill function - // before killing the process from this action. This is a blocking action. - // By default this is 5 seconds. - google.protobuf.Duration wait_duration = 1; -} diff --git a/source/common/api/BUILD b/source/common/api/BUILD index 950ea63a165cc..07443785ab89f 100644 --- a/source/common/api/BUILD +++ b/source/common/api/BUILD @@ -18,6 +18,7 @@ envoy_cc_library( "//source/common/common:thread_lib", "//source/common/event:dispatcher_lib", "//source/common/network:socket_lib", + "//source/common/stats:custom_stat_namespaces_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/source/common/api/api_impl.h b/source/common/api/api_impl.h index 9a9e1e3fad096..7c904aa9a24b2 100644 --- a/source/common/api/api_impl.h +++ b/source/common/api/api_impl.h @@ -10,6 +10,8 @@ #include "envoy/network/socket.h" #include "envoy/thread/thread.h" +#include "source/common/stats/custom_stat_namespaces_impl.h" + namespace Envoy { namespace Api { @@ -36,6 +38,7 @@ class Impl : public Api { TimeSource& timeSource() override { return time_system_; } Stats::Scope& rootScope() override { return store_; } Random::RandomGenerator& randomGenerator() override { return random_generator_; } + Stats::CustomStatNamespaces& customStatNamespaces() override { return custom_stat_namespaces_; } const envoy::config::bootstrap::v3::Bootstrap& bootstrap() const override { return bootstrap_; } ProcessContextOptRef processContext() override { return process_context_; } @@ -46,6 +49,7 @@ class Impl : public Api { Filesystem::Instance& file_system_; Random::RandomGenerator& random_generator_; const envoy::config::bootstrap::v3::Bootstrap& bootstrap_; + Stats::CustomStatNamespacesImpl custom_stat_namespaces_; ProcessContextOptRef process_context_; const Buffer::WatermarkFactorySharedPtr watermark_factory_; }; diff --git a/source/common/api/posix/os_sys_calls_impl.cc b/source/common/api/posix/os_sys_calls_impl.cc index ad2a36dd2af1a..b0afd5a8fae07 100644 --- a/source/common/api/posix/os_sys_calls_impl.cc +++ b/source/common/api/posix/os_sys_calls_impl.cc @@ -282,5 +282,20 @@ SysCallBoolResult OsSysCallsImpl::socketTcpInfo([[maybe_unused]] os_fd_t sockfd, return {false, EOPNOTSUPP}; } +bool OsSysCallsImpl::supportsGetifaddrs() const { +// https://android.googlesource.com/platform/prebuilts/ndk/+/dev/platform/sysroot/usr/include/ifaddrs.h +#if defined(__ANDROID_API__) && __ANDROID_API__ < 24 + return false; +#endif + return true; +} + +SysCallIntResult OsSysCallsImpl::getifaddrs(struct ifaddrs** ifap) { + const int rc = ::getifaddrs(ifap); + return {rc, rc != -1 ? 0 : errno}; +} + +void OsSysCallsImpl::freeifaddrs(struct ifaddrs* ifp) { ::freeifaddrs(ifp); } + } // namespace Api } // namespace Envoy diff --git a/source/common/api/posix/os_sys_calls_impl.h b/source/common/api/posix/os_sys_calls_impl.h index 77ccb0da2b083..d5c1325ecb846 100644 --- a/source/common/api/posix/os_sys_calls_impl.h +++ b/source/common/api/posix/os_sys_calls_impl.h @@ -49,6 +49,9 @@ class OsSysCallsImpl : public OsSysCalls { SysCallSocketResult duplicate(os_fd_t oldfd) override; SysCallSocketResult accept(os_fd_t socket, sockaddr* addr, socklen_t* addrlen) override; SysCallBoolResult socketTcpInfo(os_fd_t sockfd, EnvoyTcpInfo* tcp_info) override; + bool supportsGetifaddrs() const override; + SysCallIntResult getifaddrs(struct ifaddrs** ifap) override; + void freeifaddrs(struct ifaddrs* ifp) override; }; using OsSysCallsSingleton = ThreadSafeSingleton; diff --git a/source/common/api/win32/os_sys_calls_impl.cc b/source/common/api/win32/os_sys_calls_impl.cc index 3766c54a2100c..d6e0c4334e3b9 100644 --- a/source/common/api/win32/os_sys_calls_impl.cc +++ b/source/common/api/win32/os_sys_calls_impl.cc @@ -404,5 +404,9 @@ SysCallBoolResult OsSysCallsImpl::socketTcpInfo([[maybe_unused]] os_fd_t sockfd, return {false, WSAEOPNOTSUPP}; } +SysCallIntResult OsSysCallsImpl::getifaddrs(struct ifaddrs**) { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + +void OsSysCallsImpl::freeifaddrs(struct ifaddrs*) { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + } // namespace Api } // namespace Envoy diff --git a/source/common/api/win32/os_sys_calls_impl.h b/source/common/api/win32/os_sys_calls_impl.h index 4dc62e0770d2b..2460f963b222e 100644 --- a/source/common/api/win32/os_sys_calls_impl.h +++ b/source/common/api/win32/os_sys_calls_impl.h @@ -51,6 +51,9 @@ class OsSysCallsImpl : public OsSysCalls { SysCallSocketResult duplicate(os_fd_t oldfd) override; SysCallSocketResult accept(os_fd_t socket, sockaddr* addr, socklen_t* addrlen) override; SysCallBoolResult socketTcpInfo(os_fd_t sockfd, EnvoyTcpInfo* tcp_info) override; + bool supportsGetifaddrs() const override { return false; } + SysCallIntResult getifaddrs(struct ifaddrs** ifap) override; + void freeifaddrs(struct ifaddrs* ifp) override; }; using OsSysCallsSingleton = ThreadSafeSingleton; diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 3ca8dfa105c9d..ecad95d9be737 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -83,6 +83,17 @@ envoy_cc_library( hdrs = ["compiler_requirements.h"], ) +envoy_cc_library( + name = "dns_utils_lib", + srcs = ["dns_utils.cc"], + hdrs = ["dns_utils.h"], + deps = [ + ":assert_lib", + "//envoy/network:dns_interface", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "documentation_url_lib", hdrs = ["documentation_url.h"], @@ -372,8 +383,8 @@ envoy_cc_library( hdrs = ["thread.h"], external_deps = ["abseil_synchronization"], deps = envoy_cc_platform_dep("thread_impl_lib") + [ + ":macros", ":non_copyable", - "//source/common/singleton:threadsafe_singleton", ], ) diff --git a/source/common/common/dns_utils.cc b/source/common/common/dns_utils.cc new file mode 100644 index 0000000000000..0d735d634a22d --- /dev/null +++ b/source/common/common/dns_utils.cc @@ -0,0 +1,30 @@ +#include "source/common/common/dns_utils.h" + +#include "source/common/common/assert.h" + +namespace Envoy { +namespace DnsUtils { + +Network::DnsLookupFamily +getDnsLookupFamilyFromCluster(const envoy::config::cluster::v3::Cluster& cluster) { + return getDnsLookupFamilyFromEnum(cluster.dns_lookup_family()); +} + +Network::DnsLookupFamily +getDnsLookupFamilyFromEnum(envoy::config::cluster::v3::Cluster::DnsLookupFamily family) { + switch (family) { + case envoy::config::cluster::v3::Cluster::V6_ONLY: + return Network::DnsLookupFamily::V6Only; + case envoy::config::cluster::v3::Cluster::V4_ONLY: + return Network::DnsLookupFamily::V4Only; + case envoy::config::cluster::v3::Cluster::AUTO: + return Network::DnsLookupFamily::Auto; + case envoy::config::cluster::v3::Cluster::V4_PREFERRED: + return Network::DnsLookupFamily::V4Preferred; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +} // namespace DnsUtils +} // namespace Envoy diff --git a/source/common/common/dns_utils.h b/source/common/common/dns_utils.h new file mode 100644 index 0000000000000..58cc6bac53521 --- /dev/null +++ b/source/common/common/dns_utils.h @@ -0,0 +1,18 @@ +#pragma once + +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/network/dns.h" + +namespace Envoy { +namespace DnsUtils { + +/** + * Utility function to get Dns from cluster/enum. + */ +Network::DnsLookupFamily +getDnsLookupFamilyFromCluster(const envoy::config::cluster::v3::Cluster& cluster); +Network::DnsLookupFamily +getDnsLookupFamilyFromEnum(envoy::config::cluster::v3::Cluster::DnsLookupFamily family); + +} // namespace DnsUtils +} // namespace Envoy diff --git a/source/common/common/key_value_store_base.h b/source/common/common/key_value_store_base.h index c445e9f47bdde..518cc9cdb3d00 100644 --- a/source/common/common/key_value_store_base.h +++ b/source/common/common/key_value_store_base.h @@ -8,7 +8,6 @@ #include "absl/container/flat_hash_map.h" -// TODO(alyssawilk) move to a common extension dir. namespace Envoy { // This is the base implementation of the KeyValueStore. It handles the various diff --git a/source/common/common/logger.h b/source/common/common/logger.h index fdae426b8acfa..284ca290df5d6 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -28,6 +28,7 @@ namespace Logger { // TODO: find out a way for extensions to register new logger IDs #define ALL_LOGGER_IDS(FUNCTION) \ FUNCTION(admin) \ + FUNCTION(alternate_protocols_cache) \ FUNCTION(aws) \ FUNCTION(assert) \ FUNCTION(backtrace) \ @@ -332,9 +333,10 @@ template class Loggable { protected: /** * Do not use this directly, use macros defined below. + * See source/docs/logging.md for more details. * @return spdlog::logger& the static log instance to use for class local logging. */ - static spdlog::logger& __log_do_not_use_read_comment() { + static spdlog::logger& __log_do_not_use_read_comment() { // NOLINT(readability-identifier-naming) static spdlog::logger& instance = Registry::getLog(id); return instance; } diff --git a/source/common/common/thread.cc b/source/common/common/thread.cc index 282858399000b..63cac85f6ca0e 100644 --- a/source/common/common/thread.cc +++ b/source/common/common/thread.cc @@ -1,46 +1,120 @@ #include "source/common/common/thread.h" +#include + +#include "source/common/common/assert.h" +#include "source/common/common/macros.h" + namespace Envoy { namespace Thread { -bool MainThread::isMainThread() { - // If threading is off, only main thread is running. - auto main_thread_singleton = MainThreadSingleton::getExisting(); - if (main_thread_singleton == nullptr) { - return true; +namespace { + +// Singleton structure capturing which thread is the main dispatcher thread, and +// which is the test thread. This info is used for assertions around catching +// exceptions and accessing data structures which are not mutex-protected, and +// are expected only from the main thread. +// +// TODO(jmarantz): avoid the singleton and instead have this object owned +// by the ThreadFactory. That will require plumbing the API::API into all +// call-sites for isMainThread(), which might be a bit of work, but will make +// tests more hermetic. +struct ThreadIds { + // Determines whether we are currently running on the main-thread or + // test-thread. We need to allow for either one because we don't establish + // the full threading model in all unit tests. + bool inMainOrTestThread() const { + // We don't take the lock when testing the thread IDs, as they are atomic, + // and are cleared when being released. All possible thread orderings + // result in the correct result even without a lock. + std::thread::id id = std::this_thread::get_id(); + return main_thread_id_ == id || test_thread_id_ == id; + } + + bool isMainThreadActive() const { + absl::MutexLock lock(&mutex_); + return main_thread_use_count_ != 0; } - // When threading is on, compare thread id with main thread id. - return main_thread_singleton->inMainThread() || main_thread_singleton->inTestThread(); -} - -bool MainThread::isWorkerThread() { - auto main_thread_singleton = MainThreadSingleton::getExisting(); - // Allow worker thread code to be executed in test thread. - if (main_thread_singleton == nullptr) { - return true; + + // Returns a singleton instance of this. The instance is never freed. + static ThreadIds& get() { MUTABLE_CONSTRUCT_ON_FIRST_USE(ThreadIds); } + + // Call this when the MainThread exits. Nested semantics are supported, so + // that if multiple MainThread instances are declared, we unwind them + // properly. + void releaseMainThread() { + absl::MutexLock lock(&mutex_); + ASSERT(main_thread_use_count_ > 0); + ASSERT(std::this_thread::get_id() == main_thread_id_); + if (--main_thread_use_count_ == 0) { + // Clearing the thread ID when its use-count goes to zero allows us + // to read the atomic without taking a lock. + main_thread_id_ = std::thread::id{}; + } } - // When threading is on, compare thread id with main thread id. - return !main_thread_singleton->inMainThread(); -} - -void MainThread::clear() { - delete MainThreadSingleton::getExisting(); - MainThreadSingleton::clear(); -} - -void MainThread::initTestThread() { - if (!initialized()) { - MainThreadSingleton::initialize(new MainThread()); + + // Call this when the TestThread exits. Nested semantics are supported, so + // that if multiple TestThread instances are declared, we unwind them + // properly. + void releaseTestThread() { + absl::MutexLock lock(&mutex_); + ASSERT(test_thread_use_count_ > 0); + ASSERT(std::this_thread::get_id() == test_thread_id_); + if (--test_thread_use_count_ == 0) { + // Clearing the thread ID when its use-count goes to zero allows us + // to read the atomic without taking a lock. + test_thread_id_ = std::thread::id{}; + } + } + + // Declares current thread as the main one, or verifies that the current + // thread matches any previous declarations. + void registerMainThread() { + absl::MutexLock lock(&mutex_); + if (++main_thread_use_count_ > 1) { + ASSERT(std::this_thread::get_id() == main_thread_id_); + } else { + main_thread_id_ = std::this_thread::get_id(); + } } - MainThreadSingleton::get().registerTestThread(); -} -void MainThread::initMainThread() { - if (!initialized()) { - MainThreadSingleton::initialize(new MainThread()); + // Declares current thread as the test thread, or verifies that the current + // thread matches any previous declarations. + void registerTestThread() { + absl::MutexLock lock(&mutex_); + if (++test_thread_use_count_ > 1) { + ASSERT(std::this_thread::get_id() == test_thread_id_); + } else { + test_thread_id_ = std::this_thread::get_id(); + } } - MainThreadSingleton::get().registerMainThread(); -} + +private: + // The atomic thread IDs can be read without a mutex, but they are written + // under a mutex so that they are consistent with their use_counts. this + // avoids the possibility of two threads racing to claim being the main/test + // thread. + std::atomic main_thread_id_; + std::atomic test_thread_id_; + + int32_t main_thread_use_count_ GUARDED_BY(mutex_) = 0; + int32_t test_thread_use_count_ GUARDED_BY(mutex_) = 0; + mutable absl::Mutex mutex_; +}; + +} // namespace + +bool MainThread::isMainOrTestThread() { return ThreadIds::get().inMainOrTestThread(); } + +bool MainThread::isMainThreadActive() { return ThreadIds::get().isMainThreadActive(); } + +TestThread::TestThread() { ThreadIds::get().registerTestThread(); } + +TestThread::~TestThread() { ThreadIds::get().releaseTestThread(); } + +MainThread::MainThread() { ThreadIds::get().registerMainThread(); } + +MainThread::~MainThread() { ThreadIds::get().releaseMainThread(); } } // namespace Thread } // namespace Envoy diff --git a/source/common/common/thread.h b/source/common/common/thread.h index 347df89c9fab1..f1f415cf04d50 100644 --- a/source/common/common/thread.h +++ b/source/common/common/thread.h @@ -8,7 +8,6 @@ #include "envoy/thread/thread.h" #include "source/common/common/non_copyable.h" -#include "source/common/singleton/threadsafe_singleton.h" #include "absl/synchronization/mutex.h" @@ -169,36 +168,43 @@ class AtomicPtr : private AtomicPtrArray { T* get(const MakeObject& make_object) { return BaseClass::get(0, make_object); } }; -struct MainThread { - using MainThreadSingleton = InjectableSingleton; - bool inMainThread() const { return main_thread_id_ == std::this_thread::get_id(); } - bool inTestThread() const { - return test_thread_id_.has_value() && (test_thread_id_.value() == std::this_thread::get_id()); - } - void registerTestThread() { test_thread_id_ = std::this_thread::get_id(); } - void registerMainThread() { main_thread_id_ = std::this_thread::get_id(); } - static bool initialized() { return MainThreadSingleton::getExisting() != nullptr; } - /* - * Register the main thread id, should be called in main thread before threading is on. Currently - * called in ThreadLocal::InstanceImpl(). - */ - static void initMainThread(); - /* - * Register the test thread id, should be called in test thread before threading is on. Allow - * some main thread only code to be executed on test thread. - */ - static void initTestThread(); - /* - * Delete the main thread singleton, should be called in main thread after threading - * has been shut down. Currently called in ~ThreadLocal::InstanceImpl(). +// RAII object to declare the TestThread. This should be declared in main() or +// equivalent for any test binaries. +// +// Generally we expect TestThread to be instantiated only once on main() for +// each test binary, though nested instantiations are allowed as long as the +// thread ID does not change. +class TestThread { +public: + TestThread(); + ~TestThread(); +}; + +// RAII object to declare the MainThread. This should be declared in the thread +// function or equivalent. +// +// Generally we expect MainThread to be instantiated only once or twice. It has +// to be instantiated prior to OptionsImpl being created, so it needs to be in +// instantiated from main_common(). In addition, it is instantiated by +// ThreadLocal implementation to get the correct behavior for tests that do not +// instantiate main. +// +// In general, nested instantiations are allowed as long as the thread ID does +// not change. +class MainThread { +public: + MainThread(); + ~MainThread(); + + /** + * @return whether the current thread is the main thread or test thread. */ - static void clear(); - static bool isMainThread(); - static bool isWorkerThread(); + static bool isMainOrTestThread(); -private: - std::thread::id main_thread_id_; - absl::optional test_thread_id_; + /** + * @return whether a MainThread has been instantiated. + */ + static bool isMainThreadActive(); }; // To improve exception safety in data plane, we plan to forbid the use of raw try in the core code @@ -206,7 +212,7 @@ struct MainThread { // worker thread. #define TRY_ASSERT_MAIN_THREAD \ try { \ - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); #define END_TRY } diff --git a/source/common/config/context_provider_impl.h b/source/common/config/context_provider_impl.h index 3123b2066cef4..590b9c4eff2b8 100644 --- a/source/common/config/context_provider_impl.h +++ b/source/common/config/context_provider_impl.h @@ -20,7 +20,7 @@ class ContextProviderImpl : public ContextProvider { const xds::core::v3::ContextParams& nodeContext() const override { return node_context_; } const xds::core::v3::ContextParams& dynamicContext(absl::string_view resource_type_url) const override { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); auto it = dynamic_context_.find(resource_type_url); if (it != dynamic_context_.end()) { return it->second; @@ -29,7 +29,7 @@ class ContextProviderImpl : public ContextProvider { }; void setDynamicContextParam(absl::string_view resource_type_url, absl::string_view key, absl::string_view value) override { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); (*dynamic_context_[resource_type_url] .mutable_params())[toStdStringView(key)] = // NOLINT(std::string_view) toStdStringView(value); // NOLINT(std::string_view) @@ -37,14 +37,14 @@ class ContextProviderImpl : public ContextProvider { } void unsetDynamicContextParam(absl::string_view resource_type_url, absl::string_view key) override { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); dynamic_context_[resource_type_url].mutable_params()->erase( toStdStringView(key)); // NOLINT(std::string_view) update_cb_helper_.runCallbacks(resource_type_url); } ABSL_MUST_USE_RESULT Common::CallbackHandlePtr addDynamicContextUpdateCallback(UpdateNotificationCb callback) const override { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); return update_cb_helper_.add(callback); }; diff --git a/source/common/config/delta_subscription_state.cc b/source/common/config/delta_subscription_state.cc index b1b162d3f3274..26e9d1d696232 100644 --- a/source/common/config/delta_subscription_state.cc +++ b/source/common/config/delta_subscription_state.cc @@ -124,7 +124,11 @@ void DeltaSubscriptionState::handleGoodResponse( { const auto scoped_update = ttl_.scopedTtlUpdate(); for (const auto& resource : message.resources()) { - addResourceState(resource); + if (wildcard_ || resource_state_.contains(resource.name())) { + // Only consider tracked resources. + // NOTE: This is not gonna work for xdstp resources with glob resource matching. + addResourceState(resource); + } } } diff --git a/source/common/config/protobuf_link_hacks.h b/source/common/config/protobuf_link_hacks.h index 991291789ef9a..c5eff8e5fb8dd 100644 --- a/source/common/config/protobuf_link_hacks.h +++ b/source/common/config/protobuf_link_hacks.h @@ -3,6 +3,7 @@ #include "envoy/service/cluster/v3/cds.pb.h" #include "envoy/service/discovery/v3/ads.pb.h" #include "envoy/service/endpoint/v3/eds.pb.h" +#include "envoy/service/endpoint/v3/leds.pb.h" #include "envoy/service/extension/v3/config_discovery.pb.h" #include "envoy/service/health/v3/hds.pb.h" #include "envoy/service/listener/v3/lds.pb.h" @@ -24,6 +25,7 @@ const envoy::service::listener::v3::LdsDummy _lds_dummy_v3; const envoy::service::route::v3::RdsDummy _rds_dummy_v3; const envoy::service::cluster::v3::CdsDummy _cds_dummy_v3; const envoy::service::endpoint::v3::EdsDummy _eds_dummy_v3; +const envoy::service::endpoint::v3::LedsDummy _leds_dummy_v3; const envoy::service::route::v3::SrdsDummy _srds_dummy_v3; const envoy::service::extension::v3::EcdsDummy _ecds_dummy_v3; const envoy::service::runtime::v3::RtdsDummy _rtds_dummy_v3; diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index 74f5fb439e3e1..500556158f304 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -42,9 +42,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.primaryClusters(), api_config_source); Utility::checkTransportVersion(api_config_source); - const auto transport_api_version = envoy::config::core::v3::ApiVersion::V3; switch (api_config_source.api_type()) { - case envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY: + case envoy::config::core::v3::ApiConfigSource::DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE: throw EnvoyException( "REST_LEGACY no longer a supported ApiConfigSource. " "Please specify an explicit supported api_type in the following config:\n" + @@ -53,9 +52,9 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( return std::make_unique( local_info_, cm_, api_config_source.cluster_names()[0], dispatcher_, api_.randomGenerator(), Utility::apiConfigSourceRefreshDelay(api_config_source), - Utility::apiConfigSourceRequestTimeout(api_config_source), - restMethod(type_url, transport_api_version), type_url, callbacks, resource_decoder, stats, - Utility::configSourceInitialFetchTimeout(config), validation_visitor_); + Utility::apiConfigSourceRequestTimeout(api_config_source), restMethod(type_url), type_url, + callbacks, resource_decoder, stats, Utility::configSourceInitialFetchTimeout(config), + validation_visitor_); case envoy::config::core::v3::ApiConfigSource::GRPC: return std::make_unique( std::make_shared( @@ -63,8 +62,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, scope, true) ->createUncachedRawAsyncClient(), - dispatcher_, sotwGrpcMethod(type_url, transport_api_version), api_.randomGenerator(), - scope, Utility::parseRateLimitSettings(api_config_source), + dispatcher_, sotwGrpcMethod(type_url), api_.randomGenerator(), scope, + Utility::parseRateLimitSettings(api_config_source), api_config_source.set_node_on_first_message_only()), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), @@ -75,8 +74,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, scope, true) ->createUncachedRawAsyncClient(), - dispatcher_, deltaGrpcMethod(type_url, transport_api_version), api_.randomGenerator(), - scope, Utility::parseRateLimitSettings(api_config_source), local_info_), + dispatcher_, deltaGrpcMethod(type_url), api_.randomGenerator(), scope, + Utility::parseRateLimitSettings(api_config_source), local_info_), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), /*is_aggregated*/ false, options); } @@ -116,37 +115,55 @@ SubscriptionPtr SubscriptionFactoryImpl::collectionSubscriptionFromUrl( fmt::format("xdstp:// type does not match {} in {}", resource_type, Config::XdsResourceIdentifier::encodeUrl(collection_locator))); } - const envoy::config::core::v3::ApiConfigSource& api_config_source = config.api_config_source(); - Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.primaryClusters(), - api_config_source); + switch (config.config_source_specifier_case()) { + case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kApiConfigSource: { + const envoy::config::core::v3::ApiConfigSource& api_config_source = + config.api_config_source(); + Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.primaryClusters(), + api_config_source); - SubscriptionOptions options; - // All Envoy collections currently are xDS resource graph roots and require node context - // parameters. - options.add_xdstp_node_context_params_ = true; - switch (api_config_source.api_type()) { - case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: { - const std::string type_url = TypeUtil::descriptorFullNameToTypeUrl(resource_type); - return std::make_unique( - collection_locator, - std::make_shared( - Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), - api_config_source, scope, true) - ->createUncachedRawAsyncClient(), - dispatcher_, deltaGrpcMethod(type_url, envoy::config::core::v3::ApiVersion::V3), - api_.randomGenerator(), scope, Utility::parseRateLimitSettings(api_config_source), - local_info_), - callbacks, resource_decoder, stats, dispatcher_, - Utility::configSourceInitialFetchTimeout(config), false, options); + SubscriptionOptions options; + // All Envoy collections currently are xDS resource graph roots and require node context + // parameters. + options.add_xdstp_node_context_params_ = true; + switch (api_config_source.api_type()) { + case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: { + const std::string type_url = TypeUtil::descriptorFullNameToTypeUrl(resource_type); + return std::make_unique( + collection_locator, + std::make_shared( + Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), + api_config_source, scope, true) + ->createUncachedRawAsyncClient(), + dispatcher_, deltaGrpcMethod(type_url), api_.randomGenerator(), scope, + Utility::parseRateLimitSettings(api_config_source), local_info_), + callbacks, resource_decoder, stats, dispatcher_, + Utility::configSourceInitialFetchTimeout(config), false, options); + } + case envoy::config::core::v3::ApiConfigSource::AGGREGATED_DELTA_GRPC: { + return std::make_unique( + collection_locator, cm_.adsMux(), callbacks, resource_decoder, stats, dispatcher_, + Utility::configSourceInitialFetchTimeout(config), false, options); + } + default: + throw EnvoyException(fmt::format("Unknown xdstp:// transport API type in {}", + api_config_source.DebugString())); + } } - case envoy::config::core::v3::ApiConfigSource::AGGREGATED_DELTA_GRPC: { + case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kAds: { + // TODO(adisuissa): verify that the ADS is set up in delta-xDS mode. + SubscriptionOptions options; + // All Envoy collections currently are xDS resource graph roots and require node context + // parameters. + options.add_xdstp_node_context_params_ = true; return std::make_unique( collection_locator, cm_.adsMux(), callbacks, resource_decoder, stats, dispatcher_, - Utility::configSourceInitialFetchTimeout(config), false, options); + Utility::configSourceInitialFetchTimeout(config), true, options); } default: - throw EnvoyException(fmt::format("Unknown xdstp:// transport API type in {}", - api_config_source.DebugString())); + throw EnvoyException("Missing or not supported config source specifier in " + "envoy::config::core::v3::ConfigSource for a collection. Only ADS and " + "gRPC in delta-xDS mode are supported."); } } default: diff --git a/source/common/config/type_to_endpoint.cc b/source/common/config/type_to_endpoint.cc index 1add1d4661ef0..4ee233d118ae0 100644 --- a/source/common/config/type_to_endpoint.cc +++ b/source/common/config/type_to_endpoint.cc @@ -55,6 +55,7 @@ TypeUrlToV3ServiceMap* buildTypeUrlToServiceMap() { "envoy.service.secret.v3.SecretDiscoveryService", "envoy.service.cluster.v3.ClusterDiscoveryService", "envoy.service.endpoint.v3.EndpointDiscoveryService", + "envoy.service.endpoint.v3.LocalityEndpointDiscoveryService", "envoy.service.listener.v3.ListenerDiscoveryService", "envoy.service.runtime.v3.RuntimeDiscoveryService", "envoy.service.extension.v3.ExtensionConfigDiscoveryService", @@ -92,26 +93,19 @@ TypeUrlToV3ServiceMap& typeUrlToV3ServiceMap() { } // namespace -// TODO(alyssawilk) clean up transport_api_version argument. -const Protobuf::MethodDescriptor& -deltaGrpcMethod(absl::string_view type_url, - envoy::config::core::v3::ApiVersion /*transport_api_version*/) { +const Protobuf::MethodDescriptor& deltaGrpcMethod(absl::string_view type_url) { const auto it = typeUrlToV3ServiceMap().find(static_cast(type_url)); ASSERT(it != typeUrlToV3ServiceMap().cend()); return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(it->second.delta_grpc_); } -const Protobuf::MethodDescriptor& -sotwGrpcMethod(absl::string_view type_url, - envoy::config::core::v3::ApiVersion /*transport_api_version*/) { +const Protobuf::MethodDescriptor& sotwGrpcMethod(absl::string_view type_url) { const auto it = typeUrlToV3ServiceMap().find(static_cast(type_url)); ASSERT(it != typeUrlToV3ServiceMap().cend()); return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(it->second.sotw_grpc_); } -const Protobuf::MethodDescriptor& -restMethod(absl::string_view type_url, - envoy::config::core::v3::ApiVersion /*transport_api_version*/) { +const Protobuf::MethodDescriptor& restMethod(absl::string_view type_url) { const auto it = typeUrlToV3ServiceMap().find(static_cast(type_url)); ASSERT(it != typeUrlToV3ServiceMap().cend()); return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(it->second.rest_); diff --git a/source/common/config/type_to_endpoint.h b/source/common/config/type_to_endpoint.h index 7c9f5a5007374..b600bf4e7d929 100644 --- a/source/common/config/type_to_endpoint.h +++ b/source/common/config/type_to_endpoint.h @@ -10,18 +10,12 @@ namespace Envoy { namespace Config { // Translates an xDS resource type_url to the name of the delta gRPC service that carries it. -const Protobuf::MethodDescriptor& -deltaGrpcMethod(absl::string_view resource_type_url, - envoy::config::core::v3::ApiVersion transport_api_version); +const Protobuf::MethodDescriptor& deltaGrpcMethod(absl::string_view resource_type_url); // Translates an xDS resource type_url to the name of the state-of-the-world gRPC service that // carries it. -const Protobuf::MethodDescriptor& -sotwGrpcMethod(absl::string_view resource_type_url, - envoy::config::core::v3::ApiVersion transport_api_version); +const Protobuf::MethodDescriptor& sotwGrpcMethod(absl::string_view resource_type_url); // Translates an xDS resource type_url to the name of the REST service that carries it. -const Protobuf::MethodDescriptor& -restMethod(absl::string_view resource_type_url, - envoy::config::core::v3::ApiVersion transport_api_version); +const Protobuf::MethodDescriptor& restMethod(absl::string_view resource_type_url); } // namespace Config } // namespace Envoy diff --git a/source/common/config/utility.h b/source/common/config/utility.h index 1d0603a1e45d2..97523eca4be38 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -191,7 +191,7 @@ class Utility { */ template static void checkTransportVersion(const Proto& api_config_source) { const auto transport_api_version = api_config_source.transport_api_version(); - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); if (transport_api_version == envoy::config::core::v3::ApiVersion::AUTO || transport_api_version == envoy::config::core::v3::ApiVersion::V2) { Runtime::LoaderSingleton::getExisting()->countDeprecatedFeatureUse(); @@ -300,7 +300,7 @@ class Utility { * Get a Factory from the registry with error checking to ensure the name and the factory are * valid. And a flag to control return nullptr or throw an exception. * @param message proto that contains fields 'name' and 'typed_config'. - * @param is_optional an exception will be throw when the value is true and no factory found. + * @param is_optional an exception will be throw when the value is false and no factory found. * @return factory the factory requested or nullptr if it does not exist. */ template diff --git a/source/common/config/xds_mux/delta_subscription_state.cc b/source/common/config/xds_mux/delta_subscription_state.cc index dd3b8e686cb73..327171480048d 100644 --- a/source/common/config/xds_mux/delta_subscription_state.cc +++ b/source/common/config/xds_mux/delta_subscription_state.cc @@ -102,7 +102,11 @@ void DeltaSubscriptionState::handleGoodResponse( { const auto scoped_update = ttl_.scopedTtlUpdate(); for (const auto& resource : message.resources()) { - addResourceState(resource); + if (wildcard_ || resource_state_.contains(resource.name())) { + // Only consider tracked resources. + // NOTE: This is not gonna work for xdstp resources with glob resource matching. + addResourceState(resource); + } } } diff --git a/source/common/crypto/BUILD b/source/common/crypto/BUILD index 07b461c6ab0b7..0b6e29babbd23 100644 --- a/source/common/crypto/BUILD +++ b/source/common/crypto/BUILD @@ -10,8 +10,17 @@ envoy_package() envoy_cc_library( name = "utility_lib", + srcs = [ + "crypto_impl.cc", + "utility_impl.cc", + ], hdrs = [ + "crypto_impl.h", "utility.h", + "utility_impl.h", + ], + external_deps = [ + "ssl", ], deps = [ "//envoy/buffer:buffer_interface", diff --git a/source/extensions/common/crypto/crypto_impl.cc b/source/common/crypto/crypto_impl.cc similarity index 82% rename from source/extensions/common/crypto/crypto_impl.cc rename to source/common/crypto/crypto_impl.cc index e26682cbf6483..e560c70892dc5 100644 --- a/source/extensions/common/crypto/crypto_impl.cc +++ b/source/common/crypto/crypto_impl.cc @@ -1,4 +1,4 @@ -#include "source/extensions/common/crypto/crypto_impl.h" +#include "source/common/crypto/crypto_impl.h" namespace Envoy { namespace Common { diff --git a/source/extensions/common/crypto/crypto_impl.h b/source/common/crypto/crypto_impl.h similarity index 100% rename from source/extensions/common/crypto/crypto_impl.h rename to source/common/crypto/crypto_impl.h diff --git a/source/extensions/common/crypto/utility_impl.cc b/source/common/crypto/utility_impl.cc similarity index 96% rename from source/extensions/common/crypto/utility_impl.cc rename to source/common/crypto/utility_impl.cc index 7319bfca3676a..684347236c144 100644 --- a/source/extensions/common/crypto/utility_impl.cc +++ b/source/common/crypto/utility_impl.cc @@ -1,7 +1,7 @@ -#include "source/extensions/common/crypto/utility_impl.h" +#include "source/common/crypto/utility_impl.h" #include "source/common/common/assert.h" -#include "source/extensions/common/crypto/crypto_impl.h" +#include "source/common/crypto/crypto_impl.h" #include "absl/container/fixed_array.h" #include "absl/strings/ascii.h" diff --git a/source/extensions/common/crypto/utility_impl.h b/source/common/crypto/utility_impl.h similarity index 100% rename from source/extensions/common/crypto/utility_impl.h rename to source/common/crypto/utility_impl.h diff --git a/source/common/grpc/async_client_manager_impl.cc b/source/common/grpc/async_client_manager_impl.cc index 92c368ebb6f41..ff712fca14bf0 100644 --- a/source/common/grpc/async_client_manager_impl.cc +++ b/source/common/grpc/async_client_manager_impl.cc @@ -45,16 +45,7 @@ AsyncClientFactoryImpl::AsyncClientFactoryImpl(Upstream::ClusterManager& cm, if (skip_cluster_check) { return; } - - const std::string& cluster_name = config.envoy_grpc().cluster_name(); - auto all_clusters = cm_.clusters(); - const auto& it = all_clusters.active_clusters_.find(cluster_name); - if (it == all_clusters.active_clusters_.end()) { - throw EnvoyException(fmt::format("Unknown gRPC client cluster '{}'", cluster_name)); - } - if (it->second.get().info()->addedViaApi()) { - throw EnvoyException(fmt::format("gRPC client cluster '{}' is not static", cluster_name)); - } + cm_.checkActiveStaticCluster(config.envoy_grpc().cluster_name()); } AsyncClientManagerImpl::AsyncClientManagerImpl(Upstream::ClusterManager& cm, diff --git a/source/common/http/BUILD b/source/common/http/BUILD index ebc5bdcf0cf67..38eef19b2dcb1 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -169,6 +169,7 @@ envoy_cc_library( "alternate_protocols_cache_impl.h", "alternate_protocols_cache_manager_impl.h", ], + external_deps = ["quiche_quic_platform"], deps = [ "//envoy/common:time_interface", "//envoy/event:dispatcher_interface", @@ -176,7 +177,11 @@ envoy_cc_library( "//envoy/singleton:manager_interface", "//envoy/thread_local:thread_local_interface", "//envoy/upstream:resource_manager_interface", + "//source/common/common:key_value_store_lib", "//source/common/common:logger_lib", + "//source/common/config:utility_lib", + "@com_github_google_quiche//:spdy_core_alt_svc_wire_format_lib", + "@envoy_api//envoy/config/common/key_value/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -454,6 +459,7 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/common/runtime:runtime_features_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) diff --git a/source/common/http/alternate_protocols_cache_impl.cc b/source/common/http/alternate_protocols_cache_impl.cc index f4a176fb405df..c1c2d4bf18925 100644 --- a/source/common/http/alternate_protocols_cache_impl.cc +++ b/source/common/http/alternate_protocols_cache_impl.cc @@ -2,23 +2,136 @@ #include "source/common/common/logger.h" +#include "quiche/spdy/core/spdy_alt_svc_wire_format.h" +#include "re2/re2.h" + namespace Envoy { namespace Http { +namespace { + +struct RegexHolder { + RegexHolder() : origin_regex("(.*)://(.*):(\\d+)") {} + + const re2::RE2 origin_regex; +}; + +using ConstRegexHolder = ConstSingleton; + +} // namespace + +std::string +AlternateProtocolsCacheImpl::originToString(const AlternateProtocolsCache::Origin& origin) { + return absl::StrCat(origin.scheme_, "://", origin.hostname_, ":", origin.port_); +} + +absl::optional +AlternateProtocolsCacheImpl::stringToOrigin(const std::string& str) { + const re2::RE2& origin_regex = ConstRegexHolder::get().origin_regex; + std::string scheme; + std::string hostname; + int port = 0; + if (re2::RE2::FullMatch(str.c_str(), origin_regex, &scheme, &hostname, &port)) { + return AlternateProtocolsCache::Origin(scheme, hostname, port); + } + return {}; +} + +std::string AlternateProtocolsCacheImpl::protocolsToStringForCache( + const std::vector& protocols, TimeSource& /*time_source*/) { + if (protocols.empty()) { + return std::string("clear"); + } + std::string value; + for (auto& protocol : protocols) { + if (!value.empty()) { + value.push_back(','); + } + absl::StrAppend(&value, protocol.alpn_, "=\"", protocol.hostname_, ":", protocol.port_, "\""); + + // Note this is _not_ actually the max age, but the absolute time at which + // this entry will expire. protocolsFromString will convert back to ma. + absl::StrAppend( + &value, "; ma=", + std::chrono::duration_cast(protocol.expiration_.time_since_epoch()) + .count()); + } + return value; +} + +absl::optional> +AlternateProtocolsCacheImpl::protocolsFromString(absl::string_view alt_svc_string, + TimeSource& time_source, bool from_cache) { + std::vector protocols; + spdy::SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector; + if (!spdy::SpdyAltSvcWireFormat::ParseHeaderFieldValue(alt_svc_string, &altsvc_vector)) { + return {}; + } + for (const auto& alt_svc : altsvc_vector) { + MonotonicTime expiration; + if (from_cache) { + auto expire_time_from_epoch = std::chrono::seconds(alt_svc.max_age_seconds); + auto time_since_epoch = std::chrono::duration_cast( + time_source.monotonicTime().time_since_epoch()); + if (expire_time_from_epoch < time_since_epoch) { + expiration = time_source.monotonicTime(); + } else { + expiration = time_source.monotonicTime() + (expire_time_from_epoch - time_since_epoch); + } + } else { + expiration = time_source.monotonicTime() + std::chrono::seconds(alt_svc.max_age_seconds); + } + Http::AlternateProtocolsCache::AlternateProtocol protocol(alt_svc.protocol_id, alt_svc.host, + alt_svc.port, expiration); + protocols.push_back(protocol); + } + return protocols; +} -AlternateProtocolsCacheImpl::AlternateProtocolsCacheImpl(TimeSource& time_source) - : time_source_(time_source) {} +AlternateProtocolsCacheImpl::AlternateProtocolsCacheImpl( + TimeSource& time_source, std::unique_ptr&& key_value_store, size_t max_entries) + : time_source_(time_source), key_value_store_(std::move(key_value_store)), + max_entries_(max_entries > 0 ? max_entries : 1024) { + if (key_value_store_) { + KeyValueStore::ConstIterateCb load = [this](const std::string& key, const std::string& value) { + absl::optional> protocols = + protocolsFromString(value, time_source_, true); + absl::optional origin = stringToOrigin(key); + if (protocols.has_value() && origin.has_value()) { + setAlternativesImpl(origin.value(), protocols.value()); + } else { + ENVOY_LOG(warn, + fmt::format("Unable to parse cache entry with key: {} value: {}", key, value)); + } + return KeyValueStore::Iterate::Continue; + }; + key_value_store_->iterate(load); + } +} AlternateProtocolsCacheImpl::~AlternateProtocolsCacheImpl() = default; void AlternateProtocolsCacheImpl::setAlternatives(const Origin& origin, - const std::vector& protocols) { - protocols_[origin] = protocols; + std::vector& protocols) { + setAlternativesImpl(origin, protocols); + if (key_value_store_) { + key_value_store_->addOrUpdate(originToString(origin), + protocolsToStringForCache(protocols, time_source_)); + } +} + +void AlternateProtocolsCacheImpl::setAlternativesImpl(const Origin& origin, + std::vector& protocols) { static const size_t max_protocols = 10; if (protocols.size() > max_protocols) { ENVOY_LOG_MISC(trace, "Too many alternate protocols: {}, truncating", protocols.size()); - std::vector& p = protocols_[origin]; - p.erase(p.begin() + max_protocols, p.end()); + protocols.erase(protocols.begin() + max_protocols, protocols.end()); + } + while (protocols_.size() >= max_entries_) { + auto iter = protocols_.begin(); + key_value_store_->remove(originToString(iter->first)); + protocols_.erase(iter); } + protocols_[origin] = protocols; } OptRef> @@ -27,9 +140,9 @@ AlternateProtocolsCacheImpl::findAlternatives(const Origin& origin) { if (entry_it == protocols_.end()) { return makeOptRefFromPtr>(nullptr); } - std::vector& protocols = entry_it->second; + auto original_size = protocols.size(); const MonotonicTime now = time_source_.monotonicTime(); protocols.erase(std::remove_if(protocols.begin(), protocols.end(), [now](const AlternateProtocol& protocol) { @@ -39,8 +152,15 @@ AlternateProtocolsCacheImpl::findAlternatives(const Origin& origin) { if (protocols.empty()) { protocols_.erase(entry_it); + if (key_value_store_) { + key_value_store_->remove(originToString(origin)); + } return makeOptRefFromPtr>(nullptr); } + if (key_value_store_ && original_size != protocols.size()) { + key_value_store_->addOrUpdate(originToString(origin), + protocolsToStringForCache(protocols, time_source_)); + } return makeOptRef(const_cast&>(protocols)); } diff --git a/source/common/http/alternate_protocols_cache_impl.h b/source/common/http/alternate_protocols_cache_impl.h index a029a970c763f..108cbcdf761dd 100644 --- a/source/common/http/alternate_protocols_cache_impl.h +++ b/source/common/http/alternate_protocols_cache_impl.h @@ -5,35 +5,77 @@ #include #include +#include "envoy/common/key_value_store.h" #include "envoy/common/optref.h" #include "envoy/common/time.h" #include "envoy/http/alternate_protocols_cache.h" +#include "source/common/common/logger.h" + #include "absl/strings/string_view.h" +#include "quiche/common/quiche_linked_hash_map.h" namespace Envoy { namespace Http { // An implementation of AlternateProtocolsCache. // See: source/docs/http3_upstream.md -class AlternateProtocolsCacheImpl : public AlternateProtocolsCache { +class AlternateProtocolsCacheImpl : public AlternateProtocolsCache, + Logger::Loggable { public: - explicit AlternateProtocolsCacheImpl(TimeSource& time_source); + AlternateProtocolsCacheImpl(TimeSource& time_source, std::unique_ptr&& store, + size_t max_entries); ~AlternateProtocolsCacheImpl() override; + // Converts an Origin to a string which can be parsed by stringToOrigin. + static std::string originToString(const AlternateProtocolsCache::Origin& origin); + // Converts a string from originToString back to structured format. + static absl::optional stringToOrigin(const std::string& str); + + // Convert an AlternateProtocol vector to a string to cache to the key value + // store. Note that in order to determine the lifetime of entries, this + // function will serialize ma= as absolute time from the epoch rather than + // relative time. + // This function also does not do standards-required normalization. Entries requiring + // normalization will simply not be read from cache. + static std::string protocolsToStringForCache(const std::vector& protocols, + TimeSource& time_source); + // Parse an alternate protocols string into structured data, or absl::nullopt + // if it is empty or invalid. + // If from_cache is true, it is assumed the string was serialized using + // protocolsToStringForCache and the the ma fields will be parsed as absolute times + // rather than relative time. + static absl::optional> + protocolsFromString(absl::string_view protocols, TimeSource& time_source, + bool from_cache = false); + // AlternateProtocolsCache - void setAlternatives(const Origin& origin, - const std::vector& protocols) override; + void setAlternatives(const Origin& origin, std::vector& protocols) override; OptRef> findAlternatives(const Origin& origin) override; size_t size() const override; private: + void setAlternativesImpl(const Origin& origin, std::vector& protocols); // Time source used to check expiration of entries. TimeSource& time_source_; - // Map from hostname to list of alternate protocols. - // TODO(RyanTheOptimist): Add a limit to the size of this map and evict based on usage. - std::map> protocols_; + struct OriginHash { + size_t operator()(const Origin& origin) const { + // Multiply the hashes by the magic number 37 to spread the bits around. + size_t hash = std::hash()(origin.scheme_) + + 37 * (std::hash()(origin.hostname_) + + 37 * std::hash()(origin.port_)); + return hash; + } + }; + + // Map from origin to list of alternate protocols. + quiche::QuicheLinkedHashMap, OriginHash> protocols_; + + // The key value store, if flushing to persistent storage. + std::unique_ptr key_value_store_; + + const size_t max_entries_; }; } // namespace Http diff --git a/source/common/http/alternate_protocols_cache_manager_impl.cc b/source/common/http/alternate_protocols_cache_manager_impl.cc index fc04a6d084f42..b5ea969e00f80 100644 --- a/source/common/http/alternate_protocols_cache_manager_impl.cc +++ b/source/common/http/alternate_protocols_cache_manager_impl.cc @@ -1,5 +1,10 @@ #include "source/common/http/alternate_protocols_cache_manager_impl.h" +#include "envoy/common/key_value_store.h" +#include "envoy/config/common/key_value/v3/config.pb.h" +#include "envoy/config/common/key_value/v3/config.pb.validate.h" + +#include "source/common/config/utility.h" #include "source/common/http/alternate_protocols_cache_impl.h" #include "source/common/protobuf/protobuf.h" @@ -11,13 +16,20 @@ namespace Http { SINGLETON_MANAGER_REGISTRATION(alternate_protocols_cache_manager); AlternateProtocolsCacheManagerImpl::AlternateProtocolsCacheManagerImpl( - TimeSource& time_source, ThreadLocal::SlotAllocator& tls) - : time_source_(time_source), slot_(tls) { + AlternateProtocolsData& data, ThreadLocal::SlotAllocator& tls) + : data_(data), slot_(tls) { slot_.set([](Event::Dispatcher& /*dispatcher*/) { return std::make_shared(); }); } AlternateProtocolsCacheSharedPtr AlternateProtocolsCacheManagerImpl::getCache( - const envoy::config::core::v3::AlternateProtocolsCacheOptions& options) { + const envoy::config::core::v3::AlternateProtocolsCacheOptions& options, + Event::Dispatcher& dispatcher) { + if (options.has_key_value_store_config() && data_.concurrency_ != 1) { + throw EnvoyException( + fmt::format("options has key value store but Envoy has concurrency = {} : {}", + data_.concurrency_, options.DebugString())); + } + const auto& existing_cache = (*slot_).caches_.find(options.name()); if (existing_cache != (*slot_).caches_.end()) { if (!Protobuf::util::MessageDifferencer::Equivalent(options, existing_cache->second.options_)) { @@ -26,12 +38,21 @@ AlternateProtocolsCacheSharedPtr AlternateProtocolsCacheManagerImpl::getCache( " first '{}' second '{}'", options.name(), existing_cache->second.options_.DebugString(), options.DebugString())); } - return existing_cache->second.cache_; } - AlternateProtocolsCacheSharedPtr new_cache = - std::make_shared(time_source_); + std::unique_ptr store; + if (options.has_key_value_store_config()) { + envoy::config::common::key_value::v3::KeyValueStoreConfig kv_config; + MessageUtil::anyConvertAndValidate(options.key_value_store_config().typed_config(), kv_config, + data_.validation_visitor_); + auto& factory = Config::Utility::getAndCheckFactory(kv_config.config()); + store = + factory.createStore(kv_config, data_.validation_visitor_, dispatcher, data_.file_system_); + } + + AlternateProtocolsCacheSharedPtr new_cache = std::make_shared( + dispatcher.timeSource(), std::move(store), options.max_entries().value()); (*slot_).caches_.emplace(options.name(), CacheWithOptions{options, new_cache}); return new_cache; } @@ -39,7 +60,7 @@ AlternateProtocolsCacheSharedPtr AlternateProtocolsCacheManagerImpl::getCache( AlternateProtocolsCacheManagerSharedPtr AlternateProtocolsCacheManagerFactoryImpl::get() { return singleton_manager_.getTyped( SINGLETON_MANAGER_REGISTERED_NAME(alternate_protocols_cache_manager), - [this] { return std::make_shared(time_source_, tls_); }); + [this] { return std::make_shared(data_, tls_); }); } } // namespace Http diff --git a/source/common/http/alternate_protocols_cache_manager_impl.h b/source/common/http/alternate_protocols_cache_manager_impl.h index 227569ac19a60..966a8ec35c09d 100644 --- a/source/common/http/alternate_protocols_cache_manager_impl.h +++ b/source/common/http/alternate_protocols_cache_manager_impl.h @@ -2,6 +2,7 @@ #include "envoy/config/core/v3/protocol.pb.h" #include "envoy/http/alternate_protocols_cache.h" +#include "envoy/server/factory_context.h" #include "envoy/singleton/instance.h" #include "envoy/singleton/manager.h" #include "envoy/thread_local/thread_local.h" @@ -11,14 +12,26 @@ namespace Envoy { namespace Http { +struct AlternateProtocolsData { + AlternateProtocolsData(Server::Configuration::FactoryContextBase& context) + : dispatcher_(context.mainThreadDispatcher()), + validation_visitor_(context.messageValidationVisitor()), + file_system_(context.api().fileSystem()), concurrency_(context.options().concurrency()) {} + Event::Dispatcher& dispatcher_; + ProtobufMessage::ValidationVisitor& validation_visitor_; + Filesystem::Instance& file_system_; + uint32_t concurrency_; +}; + class AlternateProtocolsCacheManagerImpl : public AlternateProtocolsCacheManager, public Singleton::Instance { public: - AlternateProtocolsCacheManagerImpl(TimeSource& time_source, ThreadLocal::SlotAllocator& tls); + AlternateProtocolsCacheManagerImpl(AlternateProtocolsData& data, ThreadLocal::SlotAllocator& tls); // AlternateProtocolsCacheManager AlternateProtocolsCacheSharedPtr - getCache(const envoy::config::core::v3::AlternateProtocolsCacheOptions& options) override; + getCache(const envoy::config::core::v3::AlternateProtocolsCacheOptions& options, + Event::Dispatcher& dispatcher) override; private: // Contains a cache and the options associated with it. @@ -37,7 +50,7 @@ class AlternateProtocolsCacheManagerImpl : public AlternateProtocolsCacheManager absl::flat_hash_map caches_; }; - TimeSource& time_source_; + AlternateProtocolsData& data_; // Thread local state for the cache. ThreadLocal::TypedSlot slot_; @@ -46,16 +59,16 @@ class AlternateProtocolsCacheManagerImpl : public AlternateProtocolsCacheManager class AlternateProtocolsCacheManagerFactoryImpl : public AlternateProtocolsCacheManagerFactory { public: AlternateProtocolsCacheManagerFactoryImpl(Singleton::Manager& singleton_manager, - TimeSource& time_source, - ThreadLocal::SlotAllocator& tls) - : singleton_manager_(singleton_manager), time_source_(time_source), tls_(tls) {} + ThreadLocal::SlotAllocator& tls, + AlternateProtocolsData data) + : singleton_manager_(singleton_manager), tls_(tls), data_(data) {} AlternateProtocolsCacheManagerSharedPtr get() override; private: Singleton::Manager& singleton_manager_; - TimeSource& time_source_; ThreadLocal::SlotAllocator& tls_; + AlternateProtocolsData data_; }; } // namespace Http diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index f913103750301..2c1da999f255b 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -42,7 +42,7 @@ AsyncClientImpl::AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster, config_(http_context.asyncClientStatPrefix(), local_info, stats_store, cm, runtime, random, std::move(shadow_writer), true, false, false, false, false, {}, dispatcher.timeSource(), http_context, router_context), - dispatcher_(dispatcher) {} + dispatcher_(dispatcher), singleton_manager_(cm.clusterManagerFactory().singletonManager()) {} AsyncClientImpl::~AsyncClientImpl() { while (!active_streams_.empty()) { @@ -81,8 +81,8 @@ AsyncStreamImpl::AsyncStreamImpl(AsyncClientImpl& parent, AsyncClient::StreamCal router_(parent.config_), stream_info_(Protocol::Http11, parent.dispatcher().timeSource(), nullptr), tracing_config_(Tracing::EgressConfig::get()), - route_(std::make_shared(parent_.cluster_->name(), options.timeout, - options.hash_policy, options.retry_policy)), + route_(std::make_shared(parent_, options.timeout, options.hash_policy, + options.retry_policy)), send_xff_(options.send_xff) { stream_info_.dynamicMetadata().MergeFrom(options.metadata); diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 0d0273c696b14..608813cc01722 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -40,6 +40,7 @@ #include "source/common/router/router.h" #include "source/common/stream_info/stream_info_impl.h" #include "source/common/tracing/http_tracer_impl.h" +#include "source/common/upstream/retry_factory.h" namespace Envoy { namespace Http { @@ -67,6 +68,7 @@ class AsyncClientImpl final : public AsyncClient { Router::FilterConfig config_; Event::Dispatcher& dispatcher_; std::list> active_streams_; + Singleton::Manager& singleton_manager_; friend class AsyncStreamImpl; friend class AsyncRequestImpl; @@ -124,45 +126,6 @@ class AsyncStreamImpl : public AsyncClient::Stream, rate_limit_policy_entry_; }; - struct NullRetryPolicy : public Router::RetryPolicy { - // Router::RetryPolicy - std::chrono::milliseconds perTryTimeout() const override { - return std::chrono::milliseconds(0); - } - std::vector retryHostPredicates() const override { - return {}; - } - Upstream::RetryPrioritySharedPtr retryPriority() const override { return {}; } - - uint32_t hostSelectionMaxAttempts() const override { return 1; } - uint32_t numRetries() const override { return 1; } - uint32_t retryOn() const override { return 0; } - const std::vector& retriableStatusCodes() const override { - return retriable_status_codes_; - } - const std::vector& retriableHeaders() const override { - return retriable_headers_; - } - const std::vector& retriableRequestHeaders() const override { - return retriable_request_headers_; - } - absl::optional baseInterval() const override { - return absl::nullopt; - } - absl::optional maxInterval() const override { return absl::nullopt; } - const std::vector& resetHeaders() const override { - return reset_headers_; - } - std::chrono::milliseconds resetMaxInterval() const override { - return std::chrono::milliseconds(300000); - } - - const std::vector retriable_status_codes_{}; - const std::vector retriable_headers_{}; - const std::vector retriable_request_headers_{}; - const std::vector reset_headers_{}; - }; - struct NullConfig : public Router::Config { Router::RouteConstSharedPtr route(const Http::RequestHeaderMap&, const StreamInfo::StreamInfo&, uint64_t) const override { @@ -208,20 +171,21 @@ class AsyncStreamImpl : public AsyncClient::Stream, struct RouteEntryImpl : public Router::RouteEntry { RouteEntryImpl( - const std::string& cluster_name, const absl::optional& timeout, + AsyncClientImpl& parent, const absl::optional& timeout, const Protobuf::RepeatedPtrField& hash_policy, const absl::optional& retry_policy) - : cluster_name_(cluster_name), timeout_(timeout) { + : cluster_name_(parent.cluster_->name()), timeout_(timeout) { if (!hash_policy.empty()) { hash_policy_ = std::make_unique(hash_policy); } if (retry_policy.has_value()) { // ProtobufMessage::getStrictValidationVisitor() ? how often do we do this? + Upstream::RetryExtensionFactoryContextImpl factory_context(parent.singleton_manager_); retry_policy_ = std::make_unique( - retry_policy.value(), ProtobufMessage::getNullValidationVisitor()); + retry_policy.value(), ProtobufMessage::getNullValidationVisitor(), factory_context); } else { - retry_policy_ = std::make_unique(); + retry_policy_ = std::make_unique(); } } @@ -327,12 +291,11 @@ class AsyncStreamImpl : public AsyncClient::Stream, }; struct RouteImpl : public Router::Route { - RouteImpl(const std::string& cluster_name, - const absl::optional& timeout, + RouteImpl(AsyncClientImpl& parent, const absl::optional& timeout, const Protobuf::RepeatedPtrField& hash_policy, const absl::optional& retry_policy) - : route_entry_(cluster_name, timeout, hash_policy, retry_policy), typed_metadata_({}) {} + : route_entry_(parent, timeout, hash_policy, retry_policy), typed_metadata_({}) {} // Router::Route const Router::DirectResponseEntry* directResponseEntry() const override { return nullptr; } diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 29aa601384a44..56b40f3c84e71 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -110,13 +110,12 @@ void CodecClient::onEvent(Network::ConnectionEvent event) { if (connected_) { reason = StreamResetReason::ConnectionTermination; if (protocol_error_) { - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.return_502_for_upstream_protocol_errors")) { - reason = StreamResetReason::ProtocolError; - connection_->streamInfo().setResponseFlag( - StreamInfo::ResponseFlag::UpstreamProtocolError); - } + reason = StreamResetReason::ProtocolError; + connection_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamProtocolError); } + } else { + ENVOY_CONN_LOG(warn, "Connection is closed by {} during connecting.", *connection_, + (event == Network::ConnectionEvent::RemoteClose ? "peer" : "self")); } while (!active_requests_.empty()) { // Fake resetting all active streams so that reset() callbacks get invoked. diff --git a/source/common/http/conn_pool_grid.cc b/source/common/http/conn_pool_grid.cc index bb70fad9978c9..7ed48ce6c54bc 100644 --- a/source/common/http/conn_pool_grid.cc +++ b/source/common/http/conn_pool_grid.cc @@ -23,7 +23,6 @@ ConnectivityGrid::WrapperCallbacks::WrapperCallbacks(ConnectivityGrid& grid, grid_.dispatcher_.createTimer([this]() -> void { tryAnotherConnection(); })), current_(pool_it) {} -// TODO(#15649) add trace logging. ConnectivityGrid::WrapperCallbacks::ConnectionAttemptCallbacks::ConnectionAttemptCallbacks( WrapperCallbacks& parent, PoolIterator it) : parent_(parent), pool_it_(it), cancellable_(nullptr) {} @@ -54,7 +53,6 @@ void ConnectivityGrid::WrapperCallbacks::ConnectionAttemptCallbacks::onPoolFailu void ConnectivityGrid::WrapperCallbacks::onConnectionAttemptFailed( ConnectionAttemptCallbacks* attempt, ConnectionPool::PoolFailureReason reason, absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) { - ASSERT(host == grid_.host_); ENVOY_LOG(trace, "{} pool failed to create connection to host '{}'.", describePool(attempt->pool()), host->hostname()); if (grid_.isPoolHttp3(attempt->pool())) { @@ -107,7 +105,6 @@ void ConnectivityGrid::WrapperCallbacks::onConnectionAttemptReady( ConnectionAttemptCallbacks* attempt, RequestEncoder& encoder, Upstream::HostDescriptionConstSharedPtr host, const StreamInfo::StreamInfo& info, absl::optional protocol) { - ASSERT(host == grid_.host_); ENVOY_LOG(trace, "{} pool successfully connected to host '{}'.", describePool(attempt->pool()), host->hostname()); if (!grid_.isPoolHttp3(attempt->pool())) { @@ -204,6 +201,7 @@ ConnectivityGrid::ConnectivityGrid( // HTTP/3. // TODO(#15649) support v6/v4, WiFi/cellular. ASSERT(connectivity_options.protocols_.size() == 3); + ASSERT(alternate_protocols); } ConnectivityGrid::~ConnectivityGrid() { @@ -364,11 +362,6 @@ bool ConnectivityGrid::shouldAttemptHttp3() { ENVOY_LOG(trace, "HTTP/3 is broken to host '{}', skipping.", host_->hostname()); return false; } - if (!alternate_protocols_) { - ENVOY_LOG(trace, "No alternate protocols cache. Attempting HTTP/3 to host '{}'.", - host_->hostname()); - return true; - } if (host_->address()->type() != Network::Address::Type::Ip) { ENVOY_LOG(error, "Address is not an IP address"); ASSERT(false); diff --git a/source/common/http/filter_manager.h b/source/common/http/filter_manager.h index 5f42ef6ee642e..633fa6862d3de 100644 --- a/source/common/http/filter_manager.h +++ b/source/common/http/filter_manager.h @@ -687,6 +687,7 @@ class FilterManager : public ScopeTrackedObject, } // Http::FilterChainFactoryCallbacks + Event::Dispatcher& dispatcher() override { return dispatcher_; } void addStreamDecoderFilter(StreamDecoderFilterSharedPtr filter) override { addStreamDecoderFilterWorker(filter, nullptr, false); filters_.push_back(filter.get()); diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 06badafc13e66..211aeed6aa1f0 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -339,6 +339,18 @@ Http::Status HeaderUtility::checkRequiredRequestHeaders(const Http::RequestHeade return absl::InvalidArgumentError( absl::StrCat("missing required header: ", Envoy::Http::Headers::get().Host.get())); } + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.validate_connect")) { + if (headers.Path() && !headers.Protocol()) { + // Path and Protocol header should only be present for CONNECT for upgrade style CONNECT. + return absl::InvalidArgumentError( + absl::StrCat("missing required header: ", Envoy::Http::Headers::get().Protocol.get())); + } + if (!headers.Path() && headers.Protocol()) { + // Path and Protocol header should only be present for CONNECT for upgrade style CONNECT. + return absl::InvalidArgumentError( + absl::StrCat("missing required header: ", Envoy::Http::Headers::get().Path.get())); + } + } } else { if (!headers.Path()) { // :path header must be present for non-CONNECT requests. diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index de88ae236c440..77bc700656972 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -33,7 +33,7 @@ namespace Http1 { namespace { // Changes or additions to details should be reflected in -// docs/root/configuration/http/http_conn_man/response_code_details_details.rst +// docs/root/configuration/http/http_conn_man/response_code_details.rst struct Http1ResponseCodeDetailValues { const absl::string_view TooManyHeaders = "http1.too_many_headers"; const absl::string_view HeadersTooLarge = "http1.headers_too_large"; diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 2ccc1d32f9f46..da7092dc7bf6a 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -34,7 +34,7 @@ namespace Http { namespace Http2 { // Changes or additions to details should be reflected in -// docs/root/configuration/http/http_conn_man/response_code_details_details.rst +// docs/root/configuration/http/http_conn_man/response_code_details.rst class Http2ResponseCodeDetailValues { public: // Invalid HTTP header field was received and stream is going to be @@ -1728,14 +1728,9 @@ ClientConnectionImpl::trackOutboundFrames(bool is_outbound_flood_monitored_contr } StreamResetReason ClientConnectionImpl::getMessagingErrorResetReason() const { - StreamResetReason reason = StreamResetReason::LocalReset; - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.return_502_for_upstream_protocol_errors")) { - reason = StreamResetReason::ProtocolError; - connection_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamProtocolError); - } + connection_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamProtocolError); - return reason; + return StreamResetReason::ProtocolError; } ServerConnectionImpl::ServerConnectionImpl( diff --git a/source/common/http/http3/conn_pool.cc b/source/common/http/http3/conn_pool.cc index 49ed2e1a7b3c4..9bba3fa0221c7 100644 --- a/source/common/http/http3/conn_pool.cc +++ b/source/common/http/http3/conn_pool.cc @@ -65,10 +65,15 @@ allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_ host, priority, dispatcher, options, transport_socket_options, random_generator, state, [&quic_stat_names, &scope](HttpConnPoolImplBase* pool) -> ::Envoy::ConnectionPool::ActiveClientPtr { + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::pool), debug, + "Creating Http/3 client"); // If there's no ssl context, the secrets are not loaded. Fast-fail by returning null. auto factory = &pool->host()->transportSocketFactory(); ASSERT(dynamic_cast(factory) != nullptr); if (static_cast(factory)->sslCtx() == nullptr) { + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::pool), warn, + "Failed to create Http/3 client. Transport socket " + "factory is not configured correctly."); return nullptr; } Http3ConnPoolImpl* h3_pool = reinterpret_cast(pool); @@ -82,7 +87,13 @@ allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_ data.connection_ = Quic::createQuicNetworkConnection(h3_pool->quicInfo(), pool->dispatcher(), host_address, source_address, quic_stat_names, scope); - return std::make_unique(*pool, data); + // Store a handle to connection as it will be moved during client construction. + Network::Connection& connection = *data.connection_; + auto client = std::make_unique(*pool, data); + if (connection.state() == Network::Connection::State::Closed) { + return nullptr; + } + return client; }, [](Upstream::Host::CreateConnectionData& data, HttpConnPoolImplBase* pool) { CodecClientPtr codec{new CodecClientProd(CodecType::HTTP3, std::move(data.connection_), diff --git a/source/common/http/match_wrapper/config.cc b/source/common/http/match_wrapper/config.cc index 30c70adb2fac5..2292ca4e2d6ff 100644 --- a/source/common/http/match_wrapper/config.cc +++ b/source/common/http/match_wrapper/config.cc @@ -52,6 +52,7 @@ struct DelegatingFactoryCallbacks : public Envoy::Http::FilterChainFactoryCallba Matcher::MatchTreeSharedPtr match_tree) : delegated_callbacks_(delegated_callbacks), match_tree_(std::move(match_tree)) {} + Event::Dispatcher& dispatcher() override { return delegated_callbacks_.dispatcher(); } void addStreamDecoderFilter(Envoy::Http::StreamDecoderFilterSharedPtr filter) override { delegated_callbacks_.addStreamDecoderFilter(std::move(filter), match_tree_); } diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 9a803b9d8e906..6f2a55da1056a 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -463,6 +463,18 @@ std::string Utility::stripQueryString(const HeaderString& path) { query_offset != path_str.npos ? query_offset : path_str.size()); } +std::string Utility::replaceQueryString(const HeaderString& path, + const Utility::QueryParams& params) { + std::string new_path{Http::Utility::stripQueryString(path)}; + + if (!params.empty()) { + const auto new_query_string = Http::Utility::queryParamsToString(params); + absl::StrAppend(&new_path, new_query_string); + } + + return new_path; +} + std::string Utility::parseCookieValue(const HeaderMap& headers, const std::string& key) { // TODO(wbpcode): Modify the headers parameter type to 'RequestHeaderMap'. return parseCookie(headers, key, Http::Headers::get().Cookie); @@ -1066,5 +1078,45 @@ Utility::AuthorityAttributes Utility::parseAuthority(absl::string_view host) { return {is_ip_address, host_to_resolve, port}; } +envoy::config::route::v3::RetryPolicy +Utility::convertCoreToRouteRetryPolicy(const envoy::config::core::v3::RetryPolicy& retry_policy, + const std::string& retry_on) { + envoy::config::route::v3::RetryPolicy route_retry_policy; + constexpr uint64_t default_base_interval_ms = 1000; + constexpr uint64_t default_max_interval_ms = 10 * default_base_interval_ms; + + uint64_t base_interval_ms = default_base_interval_ms; + uint64_t max_interval_ms = default_max_interval_ms; + + if (retry_policy.has_retry_back_off()) { + const auto& core_back_off = retry_policy.retry_back_off(); + + base_interval_ms = PROTOBUF_GET_MS_REQUIRED(core_back_off, base_interval); + max_interval_ms = + PROTOBUF_GET_MS_OR_DEFAULT(core_back_off, max_interval, base_interval_ms * 10); + + if (max_interval_ms < base_interval_ms) { + throw EnvoyException("max_interval must be greater than or equal to the base_interval"); + } + } + + route_retry_policy.mutable_num_retries()->set_value( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(retry_policy, num_retries, 1)); + + auto* route_mutable_back_off = route_retry_policy.mutable_retry_back_off(); + + route_mutable_back_off->mutable_base_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(base_interval_ms)); + route_mutable_back_off->mutable_max_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(max_interval_ms)); + + // set all the other fields with appropriate values. + route_retry_policy.set_retry_on(retry_on); + route_retry_policy.mutable_per_try_timeout()->CopyFrom( + route_retry_policy.retry_back_off().max_interval()); + + return route_retry_policy; +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/utility.h b/source/common/http/utility.h index d2ba67613c35d..a13922b7aee91 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -6,6 +6,7 @@ #include "envoy/config/core/v3/http_uri.pb.h" #include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/config/route/v3/route_components.pb.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" #include "envoy/http/filter.h" @@ -247,6 +248,20 @@ absl::string_view findQueryStringStart(const HeaderString& path); */ std::string stripQueryString(const HeaderString& path); +/** + * Replace the query string portion of a given path with a new one. + * + * e.g. replaceQueryString("/foo?key=1", {key:2}) -> "/foo?key=2" + * replaceQueryString("/bar", {hello:there}) -> "/bar?hello=there" + * + * @param path the original path that may or may not contain an existing query string + * @param params the new params whose string representation should be formatted onto + * the `path` above + * @return std::string the new path whose query string has been replaced by `params` and whose path + * portion from `path` remains unchanged. + */ +std::string replaceQueryString(const HeaderString& path, const QueryParams& params); + /** * Parse a particular value out of a cookie * @param headers supplies the headers to get the cookie from. @@ -588,6 +603,16 @@ struct AuthorityAttributes { * @return hostname parse result. that includes whether host is IP Address, hostname and port-name */ AuthorityAttributes parseAuthority(absl::string_view host); + +/** + * It returns RetryPolicy defined in core api to route api. + * @param retry_policy core retry policy + * @param retry_on this specifies when retry should be invoked. + * @return route retry policy + */ +envoy::config::route::v3::RetryPolicy +convertCoreToRouteRetryPolicy(const envoy::config::core::v3::RetryPolicy& retry_policy, + const std::string& retry_on); } // namespace Utility } // namespace Http } // namespace Envoy diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 6d85e13184c8a..114142e7040a8 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -343,6 +343,7 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", + "//source/common/common:scalar_to_byte_vector_lib", "//source/common/common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/common/network/addr_family_aware_socket_option_impl.h b/source/common/network/addr_family_aware_socket_option_impl.h index 749788a4d9fa4..ff7ada58a947a 100644 --- a/source/common/network/addr_family_aware_socket_option_impl.h +++ b/source/common/network/addr_family_aware_socket_option_impl.h @@ -18,15 +18,26 @@ class AddrFamilyAwareSocketOptionImpl : public Socket::Option, AddrFamilyAwareSocketOptionImpl(envoy::config::core::v3::SocketOption::SocketState in_state, SocketOptionName ipv4_optname, SocketOptionName ipv6_optname, int value) - : ipv4_option_(std::make_unique(in_state, ipv4_optname, value)), - ipv6_option_(std::make_unique(in_state, ipv6_optname, value)) {} + : AddrFamilyAwareSocketOptionImpl(in_state, ipv4_optname, value, ipv6_optname, value) {} + AddrFamilyAwareSocketOptionImpl(envoy::config::core::v3::SocketOption::SocketState in_state, + SocketOptionName ipv4_optname, int ipv4_value, + SocketOptionName ipv6_optname, int ipv6_value) + : ipv4_option_(std::make_unique(in_state, ipv4_optname, ipv4_value)), + ipv6_option_(std::make_unique(in_state, ipv6_optname, ipv6_value)) {} + AddrFamilyAwareSocketOptionImpl(envoy::config::core::v3::SocketOption::SocketState in_state, + SocketOptionName ipv4_optname, absl::string_view ipv4_value, + SocketOptionName ipv6_optname, absl::string_view ipv6_value) + : ipv4_option_(std::make_unique(in_state, ipv4_optname, ipv4_value)), + ipv6_option_(std::make_unique(in_state, ipv6_optname, ipv6_value)) {} // Socket::Option bool setOption(Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; - // The common socket options don't require a hash key. - void hashKey(std::vector&) const override {} - + void hashKey(std::vector& hash_key) const override { + // Add both sub-options to the hash. + ipv4_option_->hashKey(hash_key); + ipv6_option_->hashKey(hash_key); + } absl::optional
getOptionDetails(const Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index e612505d83369..5954724186bce 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -110,7 +110,6 @@ addressFromSockAddrOrDie(const sockaddr_storage& ss, socklen_t ss_len, os_fd_t f // address and the socket is actually v6 only, the returned address will be // regarded as a v6 address from dual stack socket. However, this address is not going to be // used to create socket. Wrong knowledge of dual stack support won't hurt. - ASSERT(Thread::MainThread::isWorkerThread()); StatusOr address = Address::addressFromSockAddr(ss, ss_len, v6only); if (!address.ok()) { diff --git a/source/common/network/apple_dns_impl.cc b/source/common/network/apple_dns_impl.cc index 520bdada5eede..a5b39ce1d1d54 100644 --- a/source/common/network/apple_dns_impl.cc +++ b/source/common/network/apple_dns_impl.cc @@ -73,10 +73,10 @@ AppleDnsResolverImpl::startResolution(const std::string& dns_name, } ENVOY_LOG(trace, "Performing DNS resolution via Apple APIs"); - auto pending_resolution = - std::make_unique(*this, callback, dispatcher_, dns_name); + auto pending_resolution = std::make_unique(*this, callback, dispatcher_, + dns_name, dns_lookup_family); - DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(dns_lookup_family); + DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(); if (error != kDNSServiceErr_NoError) { ENVOY_LOG(warn, "DNS resolver error ({}) in dnsServiceGetAddrInfo for {}", error, dns_name); chargeGetAddrInfoErrorStats(error); @@ -136,9 +136,10 @@ void AppleDnsResolverImpl::chargeGetAddrInfoErrorStats(DNSServiceErrorType error AppleDnsResolverImpl::PendingResolution::PendingResolution(AppleDnsResolverImpl& parent, ResolveCb callback, Event::Dispatcher& dispatcher, - const std::string& dns_name) + const std::string& dns_name, + DnsLookupFamily dns_lookup_family) : parent_(parent), callback_(callback), dispatcher_(dispatcher), dns_name_(dns_name), - pending_cb_({ResolutionStatus::Success, {}}) {} + pending_cb_({ResolutionStatus::Success, {}, {}}), dns_lookup_family_(dns_lookup_family) {} AppleDnsResolverImpl::PendingResolution::~PendingResolution() { ENVOY_LOG(debug, "Destroying PendingResolution for {}", dns_name_); @@ -185,10 +186,32 @@ void AppleDnsResolverImpl::PendingResolution::onEventCallback(uint32_t events) { } } +std::list& AppleDnsResolverImpl::PendingResolution::finalAddressList() { + switch (dns_lookup_family_) { + case DnsLookupFamily::V4Only: + return pending_cb_.v4_responses_; + case DnsLookupFamily::V6Only: + return pending_cb_.v6_responses_; + case DnsLookupFamily::Auto: + // Per API docs only give v4 if v6 is not available. + if (pending_cb_.v6_responses_.empty()) { + return pending_cb_.v4_responses_; + } + return pending_cb_.v6_responses_; + case DnsLookupFamily::V4Preferred: + // Per API docs only give v6 if v4 is not available. + if (pending_cb_.v4_responses_.empty()) { + return pending_cb_.v6_responses_; + } + return pending_cb_.v4_responses_; + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + void AppleDnsResolverImpl::PendingResolution::finishResolve() { ENVOY_LOG_EVENT(debug, "apple_dns_resolution_complete", "dns resolution for {} completed with status {}", dns_name_, pending_cb_.status_); - callback_(pending_cb_.status_, std::move(pending_cb_.responses_)); + callback_(pending_cb_.status_, std::move(finalAddressList())); if (owned_) { ENVOY_LOG(debug, "Resolution for {} completed (async)", dns_name_); @@ -199,10 +222,9 @@ void AppleDnsResolverImpl::PendingResolution::finishResolve() { } } -DNSServiceErrorType -AppleDnsResolverImpl::PendingResolution::dnsServiceGetAddrInfo(DnsLookupFamily dns_lookup_family) { +DNSServiceErrorType AppleDnsResolverImpl::PendingResolution::dnsServiceGetAddrInfo() { DNSServiceProtocol protocol; - switch (dns_lookup_family) { + switch (dns_lookup_family_) { case DnsLookupFamily::V4Only: protocol = kDNSServiceProtocol_IPv4; break; @@ -210,7 +232,20 @@ AppleDnsResolverImpl::PendingResolution::dnsServiceGetAddrInfo(DnsLookupFamily d protocol = kDNSServiceProtocol_IPv6; break; case DnsLookupFamily::Auto: - protocol = kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6; + case DnsLookupFamily::V4Preferred: + /* We want to make sure we don't get any address that is not routable. Passing 0 + * to apple's `DNSServiceGetAddrInfo` will make a best attempt to filter out IPv6 + * or IPv4 addresses depending on what's routable, per Apple's documentation: + * + * If neither flag is set, the system will apply an intelligent heuristic, which + * is (currently) that it will attempt to look up both, except: + * If "hostname" is a wide-area unicast DNS hostname (i.e. not a ".local." name) but + * this host has no routable IPv6 address, then the call will not try to look up IPv6 + * addresses for "hostname", since any addresses it found would be unlikely to be of + * any use anyway. Similarly, if this host has no routable IPv4 address, the call will + * not try to look up IPv4 addresses for "hostname". + */ + protocol = 0; break; } @@ -255,7 +290,8 @@ void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( parent_.chargeGetAddrInfoErrorStats(error_code); pending_cb_.status_ = ResolutionStatus::Failure; - pending_cb_.responses_.clear(); + pending_cb_.v4_responses_.clear(); + pending_cb_.v6_responses_.clear(); finishResolve(); // Note: Nothing can follow this call to flushPendingQueries due to deletion of this @@ -271,7 +307,12 @@ void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( auto dns_response = buildDnsResponse(address, ttl); ENVOY_LOG(debug, "Address to add address={}, ttl={}", dns_response.address_->ip()->addressAsString(), ttl); - pending_cb_.responses_.push_back(dns_response); + if (dns_response.address_->ip()->ipv4()) { + pending_cb_.v4_responses_.push_back(dns_response); + } else { + ASSERT(dns_response.address_->ip()->ipv6()); + pending_cb_.v6_responses_.push_back(dns_response); + } } if (!(flags & kDNSServiceFlagsMoreComing)) { diff --git a/source/common/network/apple_dns_impl.h b/source/common/network/apple_dns_impl.h index 17328b484b3d8..3eeaba854c165 100644 --- a/source/common/network/apple_dns_impl.h +++ b/source/common/network/apple_dns_impl.h @@ -84,7 +84,8 @@ class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable& finalAddressList(); + // Small wrapping struct to accumulate addresses from firings of the // onDNSServiceGetAddrInfoReply callback. struct FinalResponse { ResolutionStatus status_; - std::list responses_; + std::list v4_responses_; + std::list v6_responses_; }; AppleDnsResolverImpl& parent_; @@ -124,6 +128,7 @@ class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable& cidrs) { + ip_list_.reserve(cidrs.size()); for (const envoy::config::core::v3::CidrRange& entry : cidrs) { CidrRange list_entry = CidrRange::create(entry); if (list_entry.isValid()) { - ip_list_.push_back(list_entry); + ip_list_.push_back(std::move(list_entry)); } else { throw EnvoyException( fmt::format("invalid ip/mask combo '{}/{}' (format is /<# mask bits>)", diff --git a/source/common/network/cidr_range.h b/source/common/network/cidr_range.h index 791f3de7b5d67..e8ee65b4b3d4e 100644 --- a/source/common/network/cidr_range.h +++ b/source/common/network/cidr_range.h @@ -25,10 +25,8 @@ class CidrRange { */ CidrRange(); - /** - * Copies an existing CidrRange. - */ - CidrRange(const CidrRange& other); + CidrRange(const CidrRange& other) = default; + CidrRange(CidrRange&& other) = default; /** * Overwrites this with other. @@ -129,7 +127,6 @@ class IpList { IpList() = default; bool contains(const Instance& address) const; - bool empty() const { return ip_list_.empty(); } private: std::vector ip_list_; diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index b5d3472ad8f57..edbd5adbe34a8 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -827,6 +827,7 @@ void ServerConnectionImpl::onTransportSocketConnectTimeout() { stream_info_.setConnectionTerminationDetails(kTransportSocketConnectTimeoutTerminationDetails); closeConnectionImmediately(); transport_socket_timeout_stat_->inc(); + failure_reason_ = "connect timeout"; } ClientConnectionImpl::ClientConnectionImpl( diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index 0f093edb405bb..7e1bf8f1f7e03 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -209,7 +209,14 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i if (!completed_ && fallback_if_failed_) { fallback_if_failed_ = false; - getAddrInfo(AF_INET); + + if (dns_lookup_family_ == DnsLookupFamily::Auto) { + getAddrInfo(AF_INET); + } else { + ASSERT(dns_lookup_family_ == DnsLookupFamily::V4Preferred); + getAddrInfo(AF_INET6); + } + // Note: Nothing can follow this call to getAddrInfo due to deletion of this // object upon synchronous resolution. return; @@ -273,13 +280,15 @@ ActiveDnsQuery* DnsResolverImpl::resolve(const std::string& dns_name, initializeChannel(&options.options_, options.optmask_); } - auto pending_resolution = - std::make_unique(*this, callback, dispatcher_, channel_, dns_name); - if (dns_lookup_family == DnsLookupFamily::Auto) { + auto pending_resolution = std::make_unique( + *this, callback, dispatcher_, channel_, dns_name, dns_lookup_family); + if (dns_lookup_family == DnsLookupFamily::Auto || + dns_lookup_family == DnsLookupFamily::V4Preferred) { pending_resolution->fallback_if_failed_ = true; } - if (dns_lookup_family == DnsLookupFamily::V4Only) { + if (dns_lookup_family == DnsLookupFamily::V4Only || + dns_lookup_family == DnsLookupFamily::V4Preferred) { pending_resolution->getAddrInfo(AF_INET); } else { pending_resolution->getAddrInfo(AF_INET6); diff --git a/source/common/network/dns_impl.h b/source/common/network/dns_impl.h index abcea92a4f885..059d82073f58f 100644 --- a/source/common/network/dns_impl.h +++ b/source/common/network/dns_impl.h @@ -40,9 +40,10 @@ class DnsResolverImpl : public DnsResolver, protected Logger::LoggableisOpen()) { + io_handle_->close(); + } + } + bool isOpen() const override { return io_handle_ != nullptr && io_handle_->isOpen(); } }; /** @@ -79,6 +86,16 @@ template class NetworkListenSocket : public ListenSocketImpl { Socket::Type socketType() const override { return T::type; } + SocketPtr duplicate() override { + if (io_handle_ == nullptr) { + // This is a listen socket that does not bind to port. Pass nullptr socket options. + return std::make_unique>(connection_info_provider_->localAddress(), + /*options=*/nullptr, /*bind_to_port*/ false); + } else { + return ListenSocketImpl::duplicate(); + } + } + // These four overrides are introduced to perform check. A null io handle is possible only if the // the owner socket is a listen socket that does not bind to port. IoHandle& ioHandle() override { @@ -97,8 +114,9 @@ template class NetworkListenSocket : public ListenSocketImpl { } } bool isOpen() const override { - ASSERT(io_handle_ != nullptr); - return io_handle_->isOpen(); + return io_handle_ == nullptr ? false // Consider listen socket as closed if it does not bind to + // port. No fd will leak. + : io_handle_->isOpen(); } protected: diff --git a/source/common/network/socket_option_impl.cc b/source/common/network/socket_option_impl.cc index 929979c8fa128..ba9ff7362dc6d 100644 --- a/source/common/network/socket_option_impl.cc +++ b/source/common/network/socket_option_impl.cc @@ -5,6 +5,7 @@ #include "source/common/api/os_sys_calls_impl.h" #include "source/common/common/assert.h" +#include "source/common/common/scalar_to_byte_vector.h" #include "source/common/common/utility.h" #include "source/common/network/address_impl.h" @@ -32,6 +33,14 @@ bool SocketOptionImpl::setOption(Socket& socket, return true; } +void SocketOptionImpl::hashKey(std::vector& hash_key) const { + if (optname_.hasValue()) { + pushScalarToByteVector(optname_.level(), hash_key); + pushScalarToByteVector(optname_.option(), hash_key); + hash_key.insert(hash_key.end(), value_.begin(), value_.end()); + } +} + absl::optional SocketOptionImpl::getOptionDetails(const Socket&, envoy::config::core::v3::SocketOption::SocketState state) const { diff --git a/source/common/network/socket_option_impl.h b/source/common/network/socket_option_impl.h index 4c47dde2c08a7..fd42517c7bd90 100644 --- a/source/common/network/socket_option_impl.h +++ b/source/common/network/socket_option_impl.h @@ -134,10 +134,7 @@ class SocketOptionImpl : public Socket::Option, Logger::Loggable&) const override {} - + void hashKey(std::vector& hash_key) const override; absl::optional
getOptionDetails(const Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 7cd7b19c00be4..f8b6bd2ff9c44 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -239,35 +239,36 @@ void Utility::throwWithMalformedIp(absl::string_view ip_address) { // need to be updated in the future. Discussion can be found at Github issue #939. Address::InstanceConstSharedPtr Utility::getLocalAddress(const Address::IpVersion version) { Address::InstanceConstSharedPtr ret; -#ifdef SUPPORTS_GETIFADDRS - struct ifaddrs* ifaddr; - struct ifaddrs* ifa; + if (Api::OsSysCallsSingleton::get().supportsGetifaddrs()) { + struct ifaddrs* ifaddr; + struct ifaddrs* ifa; - const int rc = getifaddrs(&ifaddr); - RELEASE_ASSERT(!rc, ""); + const Api::SysCallIntResult rc = Api::OsSysCallsSingleton::get().getifaddrs(&ifaddr); + RELEASE_ASSERT(!rc.return_value_, fmt::format("getiffaddrs error: {}", rc.errno_)); - // man getifaddrs(3) - for (ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next) { - if (ifa->ifa_addr == nullptr) { - continue; - } + // man getifaddrs(3) + for (ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next) { + if (ifa->ifa_addr == nullptr) { + continue; + } - if ((ifa->ifa_addr->sa_family == AF_INET && version == Address::IpVersion::v4) || - (ifa->ifa_addr->sa_family == AF_INET6 && version == Address::IpVersion::v6)) { - const struct sockaddr_storage* addr = - reinterpret_cast(ifa->ifa_addr); - ret = Address::addressFromSockAddrOrThrow( - *addr, (version == Address::IpVersion::v4) ? sizeof(sockaddr_in) : sizeof(sockaddr_in6)); - if (!isLoopbackAddress(*ret)) { - break; + if ((ifa->ifa_addr->sa_family == AF_INET && version == Address::IpVersion::v4) || + (ifa->ifa_addr->sa_family == AF_INET6 && version == Address::IpVersion::v6)) { + const struct sockaddr_storage* addr = + reinterpret_cast(ifa->ifa_addr); + ret = Address::addressFromSockAddrOrThrow(*addr, (version == Address::IpVersion::v4) + ? sizeof(sockaddr_in) + : sizeof(sockaddr_in6)); + if (!isLoopbackAddress(*ret)) { + break; + } } } - } - if (ifaddr) { - freeifaddrs(ifaddr); + if (ifaddr) { + Api::OsSysCallsSingleton::get().freeifaddrs(ifaddr); + } } -#endif // If the local address is not found above, then return the loopback address by default. if (ret == nullptr) { diff --git a/source/common/network/win32_redirect_records_option_impl.h b/source/common/network/win32_redirect_records_option_impl.h index 3fbe5f6fc5855..efa88048d9705 100644 --- a/source/common/network/win32_redirect_records_option_impl.h +++ b/source/common/network/win32_redirect_records_option_impl.h @@ -20,8 +20,6 @@ class Win32RedirectRecordsOptionImpl : public Socket::Option, // Socket::Option bool setOption(Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; - - // The common socket options don't require a hash key. void hashKey(std::vector&) const override; absl::optional
diff --git a/source/common/protobuf/message_validator_impl.cc b/source/common/protobuf/message_validator_impl.cc index 2d9d3dbb334ce..7908f688d3ac4 100644 --- a/source/common/protobuf/message_validator_impl.cc +++ b/source/common/protobuf/message_validator_impl.cc @@ -25,10 +25,27 @@ void onDeprecatedFieldCommon(absl::string_view description, bool soft_deprecatio } } // namespace -void WarningValidationVisitorImpl::setUnknownCounter(Stats::Counter& counter) { +void WipCounterBase::setWipCounter(Stats::Counter& wip_counter) { + ASSERT(wip_counter_ == nullptr); + wip_counter_ = &wip_counter; + wip_counter.add(prestats_wip_count_); +} + +void WipCounterBase::onWorkInProgressCommon(absl::string_view description) { + ENVOY_LOG_MISC(warn, "{}", description); + if (wip_counter_ != nullptr) { + wip_counter_->inc(); + } else { + prestats_wip_count_++; + } +} + +void WarningValidationVisitorImpl::setCounters(Stats::Counter& unknown_counter, + Stats::Counter& wip_counter) { + setWipCounter(wip_counter); ASSERT(unknown_counter_ == nullptr); - unknown_counter_ = &counter; - counter.add(prestats_unknown_count_); + unknown_counter_ = &unknown_counter; + unknown_counter.add(prestats_unknown_count_); } void WarningValidationVisitorImpl::onUnknownField(absl::string_view description) { @@ -53,6 +70,10 @@ void WarningValidationVisitorImpl::onDeprecatedField(absl::string_view descripti onDeprecatedFieldCommon(description, soft_deprecation); } +void WarningValidationVisitorImpl::onWorkInProgress(absl::string_view description) { + onWorkInProgressCommon(description); +} + void StrictValidationVisitorImpl::onUnknownField(absl::string_view description) { throw UnknownProtoFieldException( absl::StrCat("Protobuf message (", description, ") has unknown fields")); @@ -63,6 +84,10 @@ void StrictValidationVisitorImpl::onDeprecatedField(absl::string_view descriptio onDeprecatedFieldCommon(description, soft_deprecation); } +void StrictValidationVisitorImpl::onWorkInProgress(absl::string_view description) { + onWorkInProgressCommon(description); +} + ValidationVisitor& getNullValidationVisitor() { MUTABLE_CONSTRUCT_ON_FIRST_USE(NullValidationVisitorImpl); } diff --git a/source/common/protobuf/message_validator_impl.h b/source/common/protobuf/message_validator_impl.h index 865fc7e8ee559..5ddcdf5e59cdf 100644 --- a/source/common/protobuf/message_validator_impl.h +++ b/source/common/protobuf/message_validator_impl.h @@ -16,24 +16,34 @@ class NullValidationVisitorImpl : public ValidationVisitor { // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view) override {} void onDeprecatedField(absl::string_view, bool) override {} - - // Envoy::ProtobufMessage::ValidationVisitor bool skipValidation() override { return true; } + void onWorkInProgress(absl::string_view) override {} }; ValidationVisitor& getNullValidationVisitor(); +// Base class for both warning and strict validators. +class WipCounterBase { +protected: + void setWipCounter(Stats::Counter& wip_counter); + void onWorkInProgressCommon(absl::string_view description); + +private: + Stats::Counter* wip_counter_{}; + uint64_t prestats_wip_count_{}; +}; + class WarningValidationVisitorImpl : public ValidationVisitor, + public WipCounterBase, public Logger::Loggable { public: - void setUnknownCounter(Stats::Counter& counter); + void setCounters(Stats::Counter& unknown_counter, Stats::Counter& wip_counter); // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view description) override; void onDeprecatedField(absl::string_view description, bool soft_deprecation) override; - - // Envoy::ProtobufMessage::ValidationVisitor bool skipValidation() override { return false; } + void onWorkInProgress(absl::string_view description) override; private: // Track hashes of descriptions we've seen, to avoid log spam. A hash is used here to avoid @@ -45,16 +55,21 @@ class WarningValidationVisitorImpl : public ValidationVisitor, uint64_t prestats_unknown_count_{}; }; -class StrictValidationVisitorImpl : public ValidationVisitor { +class StrictValidationVisitorImpl : public ValidationVisitor, public WipCounterBase { public: - // Envoy::ProtobufMessage::ValidationVisitor - void onUnknownField(absl::string_view description) override; + void setCounters(Stats::Counter& wip_counter) { setWipCounter(wip_counter); } // Envoy::ProtobufMessage::ValidationVisitor + void onUnknownField(absl::string_view description) override; bool skipValidation() override { return false; } void onDeprecatedField(absl::string_view description, bool soft_deprecation) override; + void onWorkInProgress(absl::string_view description) override; }; +// TODO(mattklein123): There are various places where the default strict validator is being used. +// This does not increment the WIP stat because nothing calls setCounters() on the stock/static +// version. We should remove this as a public function as well as the stock/static version and +// make sure that all code is either using the server validation context or the null validator. ValidationVisitor& getStrictValidationVisitor(); class ValidationContextImpl : public ValidationContext { @@ -77,23 +92,24 @@ class ProdValidationContextImpl : public ValidationContextImpl { public: ProdValidationContextImpl(bool allow_unknown_static_fields, bool allow_unknown_dynamic_fields, bool ignore_unknown_dynamic_fields) - : ValidationContextImpl(allow_unknown_static_fields ? static_warning_validation_visitor_ - : getStrictValidationVisitor(), - allow_unknown_dynamic_fields - ? (ignore_unknown_dynamic_fields - ? ProtobufMessage::getNullValidationVisitor() - : dynamic_warning_validation_visitor_) - : ProtobufMessage::getStrictValidationVisitor()) {} - - ProtobufMessage::WarningValidationVisitorImpl& staticWarningValidationVisitor() { - return static_warning_validation_visitor_; - } - - ProtobufMessage::WarningValidationVisitorImpl& dynamicWarningValidationVisitor() { - return dynamic_warning_validation_visitor_; + : ValidationContextImpl( + allow_unknown_static_fields + ? static_cast(static_warning_validation_visitor_) + : strict_validation_visitor_, + allow_unknown_dynamic_fields + ? (ignore_unknown_dynamic_fields ? ProtobufMessage::getNullValidationVisitor() + : dynamic_warning_validation_visitor_) + : strict_validation_visitor_) {} + + void setCounters(Stats::Counter& static_unknown_counter, Stats::Counter& dynamic_unknown_counter, + Stats::Counter& wip_counter) { + strict_validation_visitor_.setCounters(wip_counter); + static_warning_validation_visitor_.setCounters(static_unknown_counter, wip_counter); + dynamic_warning_validation_visitor_.setCounters(dynamic_unknown_counter, wip_counter); } private: + StrictValidationVisitorImpl strict_validation_visitor_; ProtobufMessage::WarningValidationVisitorImpl static_warning_validation_visitor_; ProtobufMessage::WarningValidationVisitorImpl dynamic_warning_validation_visitor_; }; diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index c81d1b9245015..a3ee7462cfbe7 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -17,6 +17,8 @@ #include "absl/strings/match.h" #include "udpa/annotations/sensitive.pb.h" +#include "udpa/annotations/status.pb.h" +#include "xds/annotations/v3/status.pb.h" #include "yaml-cpp/yaml.h" using namespace std::chrono_literals; @@ -345,6 +347,11 @@ void checkForDeprecatedNonRepeatedEnumValue( message, validation_visitor); } +constexpr absl::string_view WipWarning = + "API features marked as work-in-progress are not considered stable, are not covered by the " + "threat model, are not supported by the security team, and are subject to breaking changes. Do " + "not use this feature without understanding each of the previous points."; + class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { public: UnexpectedFieldProtoVisitor(ProtobufMessage::ValidationVisitor& validation_visitor, @@ -367,6 +374,12 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { return nullptr; } + const auto& field_status = field.options().GetExtension(xds::annotations::v3::field_status); + if (field_status.work_in_progress()) { + validation_visitor_.onWorkInProgress(fmt::format( + "field '{}' is marked as work-in-progress. {}", field.full_name(), WipWarning)); + } + // If this field is deprecated, warn or throw an error. if (field.options().deprecated()) { if (absl::StartsWith(field.name(), "hidden_envoy_deprecated_")) { @@ -398,6 +411,24 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { } void onMessage(const Protobuf::Message& message, const void*) override { + if (message.GetDescriptor() + ->options() + .GetExtension(xds::annotations::v3::message_status) + .work_in_progress()) { + validation_visitor_.onWorkInProgress(fmt::format( + "message '{}' is marked as work-in-progress. {}", message.GetTypeName(), WipWarning)); + } + + const auto& udpa_file_options = + message.GetDescriptor()->file()->options().GetExtension(udpa::annotations::file_status); + const auto& xds_file_options = + message.GetDescriptor()->file()->options().GetExtension(xds::annotations::v3::file_status); + if (udpa_file_options.work_in_progress() || xds_file_options.work_in_progress()) { + validation_visitor_.onWorkInProgress( + fmt::format("message '{}' is contained in proto file '{}' marked as work-in-progress. {}", + message.GetTypeName(), message.GetDescriptor()->file()->name(), WipWarning)); + } + // Reject unknown fields. const auto& unknown_fields = message.GetReflection()->GetUnknownFields(message); if (!unknown_fields.empty()) { @@ -405,9 +436,6 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { for (int n = 0; n < unknown_fields.field_count(); ++n) { error_msg += absl::StrCat(n > 0 ? ", " : "", unknown_fields.field(n).number()); } - // We use the validation visitor but have hard coded behavior below for deprecated fields. - // TODO(htuch): Unify the deprecated and unknown visitor handling behind the validation - // visitor pattern. https://github.com/envoyproxy/envoy/issues/8092. if (!error_msg.empty()) { validation_visitor_.onUnknownField("type " + message.GetTypeName() + " with unknown field set {" + error_msg + "}"); @@ -651,7 +679,25 @@ void redact(Protobuf::Message* message, bool ancestor_is_sensitive) { if (field_descriptor->type() == Protobuf::FieldDescriptor::TYPE_MESSAGE) { // Recursive case: traverse message fields. - if (field_descriptor->is_repeated()) { + if (field_descriptor->is_map()) { + // Redact values of maps only. Redacting both leaves the map with multiple "[redacted]" + // keys. + const int field_size = reflection->FieldSize(*message, field_descriptor); + for (int i = 0; i < field_size; ++i) { + Protobuf::Message* map_pair = + reflection->MutableRepeatedMessage(message, field_descriptor, i); + auto* value_field_desc = map_pair->GetDescriptor()->FindFieldByName("value"); + if (sensitive && (value_field_desc->type() == Protobuf::FieldDescriptor::TYPE_STRING || + value_field_desc->type() == Protobuf::FieldDescriptor::TYPE_BYTES)) { + map_pair->GetReflection()->SetString(map_pair, value_field_desc, "[redacted]"); + } else if (value_field_desc->type() == Protobuf::FieldDescriptor::TYPE_MESSAGE) { + redact(map_pair->GetReflection()->MutableMessage(map_pair, value_field_desc), + sensitive); + } else if (sensitive) { + map_pair->GetReflection()->ClearField(map_pair, value_field_desc); + } + } + } else if (field_descriptor->is_repeated()) { const int field_size = reflection->FieldSize(*message, field_descriptor); for (int i = 0; i < field_size; ++i) { redact(reflection->MutableRepeatedMessage(message, field_descriptor, i), sensitive); diff --git a/source/common/quic/BUILD b/source/common/quic/BUILD index fed592a8ddb11..f1fd86dd5c2b5 100644 --- a/source/common/quic/BUILD +++ b/source/common/quic/BUILD @@ -265,6 +265,7 @@ envoy_cc_library( ":envoy_quic_utils_lib", ":quic_filter_manager_connection_lib", ":quic_stat_names_lib", + ":quic_transport_socket_factory_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/http:codes_lib", diff --git a/source/common/quic/active_quic_listener.cc b/source/common/quic/active_quic_listener.cc index 5bd7651758ada..304ff8bc7d34f 100644 --- a/source/common/quic/active_quic_listener.cc +++ b/source/common/quic/active_quic_listener.cc @@ -221,6 +221,24 @@ size_t ActiveQuicListener::numPacketsExpectedPerEventLoop() const { return quic_dispatcher_->NumSessions() * packets_to_read_to_connection_count_ratio_; } +void ActiveQuicListener::updateListenerConfig(Network::ListenerConfig& config) { + config_ = &config; + dynamic_cast(crypto_config_->proof_source()) + ->updateFilterChainManager(config.filterChainManager()); + quic_dispatcher_->updateListenerConfig(config); +} + +void ActiveQuicListener::onFilterChainDraining( + const std::list& draining_filter_chains) { + for (auto* filter_chain : draining_filter_chains) { + closeConnectionsWithFilterChain(filter_chain); + } +} + +void ActiveQuicListener::closeConnectionsWithFilterChain(const Network::FilterChain* filter_chain) { + quic_dispatcher_->closeConnectionsWithFilterChain(filter_chain); +} + ActiveQuicListenerFactory::ActiveQuicListenerFactory( const envoy::config::listener::v3::QuicProtocolOptions& config, uint32_t concurrency, QuicStatNames& quic_stat_names) diff --git a/source/common/quic/active_quic_listener.h b/source/common/quic/active_quic_listener.h index be72d17334770..eca1f28c0caa8 100644 --- a/source/common/quic/active_quic_listener.h +++ b/source/common/quic/active_quic_listener.h @@ -70,10 +70,15 @@ class ActiveQuicListener : public Envoy::Server::ActiveUdpListenerBase, void pauseListening() override; void resumeListening() override; void shutdownListener() override; + void updateListenerConfig(Network::ListenerConfig& config) override; + void onFilterChainDraining( + const std::list& draining_filter_chains) override; private: friend class ActiveQuicListenerPeer; + void closeConnectionsWithFilterChain(const Network::FilterChain* filter_chain); + uint8_t random_seed_[16]; std::unique_ptr crypto_config_; Event::Dispatcher& dispatcher_; diff --git a/source/common/quic/codec_impl.cc b/source/common/quic/codec_impl.cc index 0afcf65b99055..21100bb788825 100644 --- a/source/common/quic/codec_impl.cc +++ b/source/common/quic/codec_impl.cc @@ -71,6 +71,10 @@ QuicHttpClientConnectionImpl::QuicHttpClientConnectionImpl( session.set_max_inbound_header_list_size(max_request_headers_kb * 1024); } +void QuicHttpClientConnectionImpl::goAway() { + quic_client_session_.SendHttp3GoAway(quic::QUIC_PEER_GOING_AWAY, "client goaway"); +} + Http::RequestEncoder& QuicHttpClientConnectionImpl::newStream(Http::ResponseDecoder& response_decoder) { EnvoyQuicClientStream* stream = diff --git a/source/common/quic/codec_impl.h b/source/common/quic/codec_impl.h index 7faed7d94136f..b5136bb1b6313 100644 --- a/source/common/quic/codec_impl.h +++ b/source/common/quic/codec_impl.h @@ -70,8 +70,8 @@ class QuicHttpClientConnectionImpl : public QuicHttpConnectionImplBase, Http::RequestEncoder& newStream(Http::ResponseDecoder& response_decoder) override; // Http::Connection - void goAway() override { NOT_REACHED_GCOVR_EXCL_LINE; } - void shutdownNotice() override { NOT_REACHED_GCOVR_EXCL_LINE; } + void goAway() override; + void shutdownNotice() override {} void onUnderlyingConnectionAboveWriteBufferHighWatermark() override; void onUnderlyingConnectionBelowWriteBufferLowWatermark() override; diff --git a/source/common/quic/envoy_quic_client_session.cc b/source/common/quic/envoy_quic_client_session.cc index df9fd5f7a6755..8820ab1c00327 100644 --- a/source/common/quic/envoy_quic_client_session.cc +++ b/source/common/quic/envoy_quic_client_session.cc @@ -62,7 +62,7 @@ void EnvoyQuicClientSession::OnHttp3GoAway(uint64_t stream_id) { } void EnvoyQuicClientSession::MaybeSendRstStreamFrame(quic::QuicStreamId id, - quic::QuicRstStreamErrorCode error, + quic::QuicResetStreamError error, quic::QuicStreamOffset bytes_written) { QuicSpdyClientSession::MaybeSendRstStreamFrame(id, error, bytes_written); quic_stat_names_.chargeQuicResetStreamErrorStats(scope_, error, /*from_self*/ true, @@ -71,7 +71,7 @@ void EnvoyQuicClientSession::MaybeSendRstStreamFrame(quic::QuicStreamId id, void EnvoyQuicClientSession::OnRstStream(const quic::QuicRstStreamFrame& frame) { QuicSpdyClientSession::OnRstStream(frame); - quic_stat_names_.chargeQuicResetStreamErrorStats(scope_, frame.error_code, + quic_stat_names_.chargeQuicResetStreamErrorStats(scope_, frame.error(), /*from_self*/ false, /*is_upstream*/ true); } diff --git a/source/common/quic/envoy_quic_client_session.h b/source/common/quic/envoy_quic_client_session.h index def847ea0eb23..bdf7c96c49ddf 100644 --- a/source/common/quic/envoy_quic_client_session.h +++ b/source/common/quic/envoy_quic_client_session.h @@ -66,7 +66,7 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, void OnCanWrite() override; void OnHttp3GoAway(uint64_t stream_id) override; void OnTlsHandshakeComplete() override; - void MaybeSendRstStreamFrame(quic::QuicStreamId id, quic::QuicRstStreamErrorCode error, + void MaybeSendRstStreamFrame(quic::QuicStreamId id, quic::QuicResetStreamError error, quic::QuicStreamOffset bytes_written) override; void OnRstStream(const quic::QuicRstStreamFrame& frame) override; // quic::QuicSpdyClientSessionBase @@ -99,7 +99,8 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, // These callbacks are owned by network filters and quic session should outlive // them. Http::ConnectionCallbacks* http_connection_callbacks_{nullptr}; - const absl::string_view host_name_; + // TODO(danzh) deprecate this field once server_id() is made const. + const std::string host_name_; std::shared_ptr crypto_config_; EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory_; QuicStatNames& quic_stat_names_; diff --git a/source/common/quic/envoy_quic_client_stream.cc b/source/common/quic/envoy_quic_client_stream.cc index a4522b6b65485..1d413afc60974 100644 --- a/source/common/quic/envoy_quic_client_stream.cc +++ b/source/common/quic/envoy_quic_client_stream.cc @@ -275,12 +275,12 @@ void EnvoyQuicClientStream::OnStreamReset(const quic::QuicRstStreamFrame& frame) runResetCallbacks(quicRstErrorToEnvoyRemoteResetReason(frame.error_code)); } -void EnvoyQuicClientStream::Reset(quic::QuicRstStreamErrorCode error) { - ENVOY_STREAM_LOG(debug, "sending reset code={}", *this, error); +void EnvoyQuicClientStream::ResetWithError(quic::QuicResetStreamError error) { + ENVOY_STREAM_LOG(debug, "sending reset code={}", *this, error.internal_code()); stats_.tx_reset_.inc(); // Upper layers expect calling resetStream() to immediately raise reset callbacks. - runResetCallbacks(quicRstErrorToEnvoyLocalResetReason(error)); - quic::QuicSpdyClientStream::Reset(error); + runResetCallbacks(quicRstErrorToEnvoyLocalResetReason(error.internal_code())); + quic::QuicSpdyClientStream::ResetWithError(error); } void EnvoyQuicClientStream::OnConnectionClosed(quic::QuicErrorCode error, diff --git a/source/common/quic/envoy_quic_client_stream.h b/source/common/quic/envoy_quic_client_stream.h index 89b35d8e51650..f763121944a1c 100644 --- a/source/common/quic/envoy_quic_client_stream.h +++ b/source/common/quic/envoy_quic_client_stream.h @@ -49,7 +49,7 @@ class EnvoyQuicClientStream : public quic::QuicSpdyClientStream, // quic::QuicSpdyStream void OnBodyAvailable() override; void OnStreamReset(const quic::QuicRstStreamFrame& frame) override; - void Reset(quic::QuicRstStreamErrorCode error) override; + void ResetWithError(quic::QuicResetStreamError error) override; void OnClose() override; void OnCanWrite() override; // quic::Stream diff --git a/source/common/quic/envoy_quic_dispatcher.cc b/source/common/quic/envoy_quic_dispatcher.cc index 217e561e2b65d..37fd7472c9a6a 100644 --- a/source/common/quic/envoy_quic_dispatcher.cc +++ b/source/common/quic/envoy_quic_dispatcher.cc @@ -2,12 +2,14 @@ #include +#include +#include + #include "envoy/common/optref.h" #include "source/common/common/safe_memcpy.h" #include "source/common/http/utility.h" #include "source/common/quic/envoy_quic_server_connection.h" -#include "source/common/quic/envoy_quic_server_session.h" #include "source/common/quic/envoy_quic_utils.h" namespace Envoy { @@ -26,7 +28,7 @@ EnvoyQuicDispatcher::EnvoyQuicDispatcher( : quic::QuicDispatcher(&quic_config, crypto_config, version_manager, std::move(helper), std::make_unique(), std::move(alarm_factory), expected_server_connection_id_length), - connection_handler_(connection_handler), listener_config_(listener_config), + connection_handler_(connection_handler), listener_config_(&listener_config), listener_stats_(listener_stats), per_worker_stats_(per_worker_stats), dispatcher_(dispatcher), listen_socket_(listen_socket), quic_stat_names_(quic_stat_names), crypto_server_stream_factory_(crypto_server_stream_factory) { @@ -52,19 +54,21 @@ void EnvoyQuicDispatcher::OnConnectionClosed(quic::QuicConnectionId connection_i listener_stats_.downstream_cx_active_.dec(); per_worker_stats_.downstream_cx_active_.dec(); connection_handler_.decNumConnections(); - quic_stat_names_.chargeQuicConnectionCloseStats(listener_config_.listenerScope(), error, source, + quic_stat_names_.chargeQuicConnectionCloseStats(listener_config_->listenerScope(), error, source, /*is_upstream*/ false); } std::unique_ptr EnvoyQuicDispatcher::CreateQuicSession( quic::QuicConnectionId server_connection_id, const quic::QuicSocketAddress& self_address, - const quic::QuicSocketAddress& peer_address, absl::string_view alpn, + const quic::QuicSocketAddress& peer_address, absl::string_view /*alpn*/, const quic::ParsedQuicVersion& version, absl::string_view sni) { quic::QuicConfig quic_config = config(); + // TODO(danzh) use passed-in ALPN instead of hard-coded h3 after proof source interfaces takes in + // ALPN. Network::ConnectionSocketPtr connection_socket = createServerConnectionSocket( - listen_socket_.ioHandle(), self_address, peer_address, std::string(sni), alpn); + listen_socket_.ioHandle(), self_address, peer_address, std::string(sni), "h3"); const Network::FilterChain* filter_chain = - listener_config_.filterChainManager().findFilterChain(*connection_socket); + listener_config_->filterChainManager().findFilterChain(*connection_socket); auto quic_connection = std::make_unique( server_connection_id, self_address, peer_address, *helper(), *alarm_factory(), writer(), @@ -72,24 +76,21 @@ std::unique_ptr EnvoyQuicDispatcher::CreateQuicSession( auto quic_session = std::make_unique( quic_config, quic::ParsedQuicVersionVector{version}, std::move(quic_connection), this, session_helper(), crypto_config(), compressed_certs_cache(), dispatcher_, - listener_config_.perConnectionBufferLimitBytes(), quic_stat_names_, - listener_config_.listenerScope(), crypto_server_stream_factory_, - makeOptRefFromPtr(filter_chain == nullptr ? nullptr - : &filter_chain->transportSocketFactory())); + listener_config_->perConnectionBufferLimitBytes(), quic_stat_names_, + listener_config_->listenerScope(), crypto_server_stream_factory_); if (filter_chain != nullptr) { + // Setup filter chain before Initialize(). const bool has_filter_initialized = - listener_config_.filterChainFactory().createNetworkFilterChain( + listener_config_->filterChainFactory().createNetworkFilterChain( *quic_session, filter_chain->networkFilterFactories()); // QUIC listener must have HCM filter configured. Otherwise, stream creation later will fail. ASSERT(has_filter_initialized); + connections_by_filter_chain_[filter_chain].push_front( + std::reference_wrapper(*quic_session)); + quic_session->storeConnectionMapPosition(connections_by_filter_chain_, *filter_chain, + connections_by_filter_chain_[filter_chain].begin()); } quic_session->Initialize(); - // Filter chain can't be retrieved here as self address is unknown at this - // point. - // TODO(danzh): change QUIC interface to pass in self address as it is already - // known. In this way, filter chain can be retrieved at this point. But one - // thing to pay attention is that if the retrieval fails, connection needs to - // be closed, and it should be added to time wait list instead of session map. connection_handler_.incNumConnections(); listener_stats_.downstream_cx_active_.inc(); listener_stats_.downstream_cx_total_.inc(); @@ -107,5 +108,27 @@ quic::QuicConnectionId EnvoyQuicDispatcher::ReplaceLongServerConnectionId( return new_connection_id; } +void EnvoyQuicDispatcher::closeConnectionsWithFilterChain( + const Network::FilterChain* filter_chain) { + auto iter = connections_by_filter_chain_.find(filter_chain); + if (iter != connections_by_filter_chain_.end()) { + std::list>& connections = iter->second; + // Retain the number of connections in the list early because closing the connection will change + // the size. + const size_t num_connections = connections.size(); + for (size_t i = 0; i < num_connections; ++i) { + Network::Connection& connection = connections.front().get(); + // This will remove the connection from the list. And the last removal will remove connections + // from the map as well. + connection.close(Network::ConnectionCloseType::NoFlush); + } + ASSERT(connections_by_filter_chain_.find(filter_chain) == connections_by_filter_chain_.end()); + } +} + +void EnvoyQuicDispatcher::updateListenerConfig(Network::ListenerConfig& new_listener_config) { + listener_config_ = &new_listener_config; +} + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_dispatcher.h b/source/common/quic/envoy_quic_dispatcher.h index b429e908d11be..77ed2ffcb361c 100644 --- a/source/common/quic/envoy_quic_dispatcher.h +++ b/source/common/quic/envoy_quic_dispatcher.h @@ -20,6 +20,7 @@ #include "source/server/connection_handler_impl.h" #include "source/server/active_listener_base.h" #include "source/common/quic/envoy_quic_crypto_stream_factory.h" +#include "source/common/quic/envoy_quic_server_session.h" #include "source/common/quic/quic_stat_names.h" namespace Envoy { @@ -54,6 +55,9 @@ class EnvoyQuicDispatcher : public quic::QuicDispatcher { void OnConnectionClosed(quic::QuicConnectionId connection_id, quic::QuicErrorCode error, const std::string& error_details, quic::ConnectionCloseSource source) override; + void closeConnectionsWithFilterChain(const Network::FilterChain* filter_chain); + + void updateListenerConfig(Network::ListenerConfig& new_listener_config); protected: // quic::QuicDispatcher @@ -72,13 +76,14 @@ class EnvoyQuicDispatcher : public quic::QuicDispatcher { private: Network::ConnectionHandler& connection_handler_; - Network::ListenerConfig& listener_config_; + Network::ListenerConfig* listener_config_{nullptr}; Server::ListenerStats& listener_stats_; Server::PerHandlerListenerStats& per_worker_stats_; Event::Dispatcher& dispatcher_; Network::Socket& listen_socket_; QuicStatNames& quic_stat_names_; EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory_; + FilterChainToConnectionMap connections_by_filter_chain_; }; } // namespace Quic diff --git a/source/common/quic/envoy_quic_packet_writer.cc b/source/common/quic/envoy_quic_packet_writer.cc index 6a3d358bae017..e2f53bd2df5bc 100644 --- a/source/common/quic/envoy_quic_packet_writer.cc +++ b/source/common/quic/envoy_quic_packet_writer.cc @@ -16,7 +16,7 @@ quic::WriteResult convertToQuicWriteResult(Api::IoCallUint64Result& result) { quic::WriteStatus status = result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again ? quic::WRITE_STATUS_BLOCKED : quic::WRITE_STATUS_ERROR; - return {status, static_cast(result.err_->getErrorCode())}; + return {status, static_cast(result.err_->getSystemErrorCode())}; } } // namespace diff --git a/source/common/quic/envoy_quic_proof_source.cc b/source/common/quic/envoy_quic_proof_source.cc index 67d9e0ce3ce55..56f21560eb8f2 100644 --- a/source/common/quic/envoy_quic_proof_source.cc +++ b/source/common/quic/envoy_quic_proof_source.cc @@ -16,7 +16,10 @@ namespace Quic { quic::QuicReferenceCountedPointer EnvoyQuicProofSource::GetCertChain(const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, - const std::string& hostname) { + const std::string& hostname, bool* cert_matched_sni) { + // TODO(DavidSchinazi) parse the certificate to correctly fill in |cert_matched_sni|. + *cert_matched_sni = false; + CertConfigWithFilterChain res = getTlsCertConfigAndFilterChain(server_address, client_address, hostname); absl::optional> cert_config_ref = @@ -100,13 +103,15 @@ EnvoyQuicProofSource::getTlsCertConfigAndFilterChain(const quic::QuicSocketAddre Network::ConnectionSocketPtr connection_socket = createServerConnectionSocket( listen_socket_.ioHandle(), server_address, client_address, hostname, "h3"); const Network::FilterChain* filter_chain = - filter_chain_manager_.findFilterChain(*connection_socket); + filter_chain_manager_->findFilterChain(*connection_socket); if (filter_chain == nullptr) { listener_stats_.no_filter_chain_match_.inc(); ENVOY_LOG(warn, "No matching filter chain found for handshake."); return {absl::nullopt, absl::nullopt}; } + ENVOY_LOG(trace, "Got a matching cert chain {}", filter_chain->name()); + auto& transport_socket_factory = dynamic_cast(filter_chain->transportSocketFactory()); @@ -122,5 +127,10 @@ EnvoyQuicProofSource::getTlsCertConfigAndFilterChain(const quic::QuicSocketAddre return {tls_cert_configs[0].get(), *filter_chain}; } +void EnvoyQuicProofSource::updateFilterChainManager( + Network::FilterChainManager& filter_chain_manager) { + filter_chain_manager_ = &filter_chain_manager; +} + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_proof_source.h b/source/common/quic/envoy_quic_proof_source.h index fcf388c609140..84668caf4d906 100644 --- a/source/common/quic/envoy_quic_proof_source.h +++ b/source/common/quic/envoy_quic_proof_source.h @@ -14,7 +14,7 @@ class EnvoyQuicProofSource : public EnvoyQuicProofSourceBase { EnvoyQuicProofSource(Network::Socket& listen_socket, Network::FilterChainManager& filter_chain_manager, Server::ListenerStats& listener_stats) - : listen_socket_(listen_socket), filter_chain_manager_(filter_chain_manager), + : listen_socket_(listen_socket), filter_chain_manager_(&filter_chain_manager), listener_stats_(listener_stats) {} ~EnvoyQuicProofSource() override = default; @@ -22,7 +22,10 @@ class EnvoyQuicProofSource : public EnvoyQuicProofSourceBase { // quic::ProofSource quic::QuicReferenceCountedPointer GetCertChain(const quic::QuicSocketAddress& server_address, - const quic::QuicSocketAddress& client_address, const std::string& hostname) override; + const quic::QuicSocketAddress& client_address, const std::string& hostname, + bool* cert_matched_sni) override; + + void updateFilterChainManager(Network::FilterChainManager& filter_chain_manager); protected: // quic::ProofSource @@ -43,7 +46,7 @@ class EnvoyQuicProofSource : public EnvoyQuicProofSourceBase { const std::string& hostname); Network::Socket& listen_socket_; - Network::FilterChainManager& filter_chain_manager_; + Network::FilterChainManager* filter_chain_manager_{nullptr}; Server::ListenerStats& listener_stats_; }; diff --git a/source/common/quic/envoy_quic_server_connection.cc b/source/common/quic/envoy_quic_server_connection.cc index 963032e041f00..6a4d691009361 100644 --- a/source/common/quic/envoy_quic_server_connection.cc +++ b/source/common/quic/envoy_quic_server_connection.cc @@ -38,7 +38,8 @@ bool EnvoyQuicServerConnection::OnPacketHeader(const quic::QuicPacketHeader& hea std::unique_ptr EnvoyQuicServerConnection::MakeSelfIssuedConnectionIdManager() { return std::make_unique( - quic::kMinNumOfActiveConnectionIds, connection_id(), clock(), alarm_factory(), this); + quic::kMinNumOfActiveConnectionIds, connection_id(), clock(), alarm_factory(), this, + context()); } quic::QuicConnectionId EnvoyQuicSelfIssuedConnectionIdManager::GenerateNewConnectionId( diff --git a/source/common/quic/envoy_quic_server_session.cc b/source/common/quic/envoy_quic_server_session.cc index 15d1d28745dd3..67d224deeeb01 100644 --- a/source/common/quic/envoy_quic_server_session.cc +++ b/source/common/quic/envoy_quic_server_session.cc @@ -1,5 +1,6 @@ #include "source/common/quic/envoy_quic_server_session.h" +#include #include #include "source/common/common/assert.h" @@ -15,15 +16,14 @@ EnvoyQuicServerSession::EnvoyQuicServerSession( quic::QuicCryptoServerStream::Helper* helper, const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, QuicStatNames& quic_stat_names, Stats::Scope& listener_scope, - EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory, - OptRef transport_socket_factory) + EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory) : quic::QuicServerSessionBase(config, supported_versions, connection.get(), visitor, helper, crypto_config, compressed_certs_cache), QuicFilterManagerConnectionImpl(*connection, connection->connection_id(), dispatcher, send_buffer_limit), quic_connection_(std::move(connection)), quic_stat_names_(quic_stat_names), - listener_scope_(listener_scope), crypto_server_stream_factory_(crypto_server_stream_factory), - transport_socket_factory_(transport_socket_factory) {} + listener_scope_(listener_scope), crypto_server_stream_factory_(crypto_server_stream_factory) { +} EnvoyQuicServerSession::~EnvoyQuicServerSession() { ASSERT(!quic_connection_->connected()); @@ -39,7 +39,9 @@ EnvoyQuicServerSession::CreateQuicCryptoServerStream( const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache) { return crypto_server_stream_factory_.createEnvoyQuicCryptoServerStream( - crypto_config, compressed_certs_cache, this, stream_helper(), transport_socket_factory_, + crypto_config, compressed_certs_cache, this, stream_helper(), + makeOptRefFromPtr(position_.has_value() ? &position_->filter_chain_.transportSocketFactory() + : nullptr), dispatcher()); } @@ -89,6 +91,17 @@ void EnvoyQuicServerSession::OnConnectionClosed(const quic::QuicConnectionCloseF quic::ConnectionCloseSource source) { quic::QuicServerSessionBase::OnConnectionClosed(frame, source); onConnectionCloseEvent(frame, source, version()); + if (position_.has_value()) { + // Remove this connection from the map. + std::list>& connections = + position_->connection_map_[&position_->filter_chain_]; + connections.erase(position_->iterator_); + if (connections.empty()) { + // Remove the whole entry if this is the last connection using this filter chain. + position_->connection_map_.erase(&position_->filter_chain_); + } + position_.reset(); + } } void EnvoyQuicServerSession::Initialize() { @@ -120,7 +133,7 @@ void EnvoyQuicServerSession::OnTlsHandshakeComplete() { } void EnvoyQuicServerSession::MaybeSendRstStreamFrame(quic::QuicStreamId id, - quic::QuicRstStreamErrorCode error, + quic::QuicResetStreamError error, quic::QuicStreamOffset bytes_written) { QuicServerSessionBase::MaybeSendRstStreamFrame(id, error, bytes_written); quic_stat_names_.chargeQuicResetStreamErrorStats(listener_scope_, error, /*from_self*/ true, @@ -129,9 +142,15 @@ void EnvoyQuicServerSession::MaybeSendRstStreamFrame(quic::QuicStreamId id, void EnvoyQuicServerSession::OnRstStream(const quic::QuicRstStreamFrame& frame) { QuicServerSessionBase::OnRstStream(frame); - quic_stat_names_.chargeQuicResetStreamErrorStats(listener_scope_, frame.error_code, + quic_stat_names_.chargeQuicResetStreamErrorStats(listener_scope_, frame.error(), /*from_self*/ false, /*is_upstream*/ false); } +void EnvoyQuicServerSession::storeConnectionMapPosition(FilterChainToConnectionMap& connection_map, + const Network::FilterChain& filter_chain, + ConnectionMapIter position) { + position_.emplace(connection_map, filter_chain, position); +} + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_server_session.h b/source/common/quic/envoy_quic_server_session.h index a3a804023aef9..e63ba55473a51 100644 --- a/source/common/quic/envoy_quic_server_session.h +++ b/source/common/quic/envoy_quic_server_session.h @@ -29,6 +29,25 @@ namespace Envoy { namespace Quic { +using FilterChainToConnectionMap = + absl::flat_hash_map>>; +using ConnectionMapIter = std::list>::iterator; + +// Used to track the matching filter chain and its position in the filter chain to connection map. +struct ConnectionMapPosition { + ConnectionMapPosition(FilterChainToConnectionMap& connection_map, + const Network::FilterChain& filter_chain, ConnectionMapIter iterator) + : connection_map_(connection_map), filter_chain_(filter_chain), iterator_(iterator) {} + + // Stores the map from filter chain of connections. + FilterChainToConnectionMap& connection_map_; + // The matching filter chain of a connection. + const Network::FilterChain& filter_chain_; + // The position of the connection in the map. + ConnectionMapIter iterator_; +}; + // Act as a Network::Connection to HCM and a FilterManager to FilterFactoryCb. // TODO(danzh) Lifetime of quic connection and filter manager connection can be // simplified by changing the inheritance to a member variable instantiated @@ -45,8 +64,7 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, quic::QuicCompressedCertsCache* compressed_certs_cache, Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, QuicStatNames& quic_stat_names, Stats::Scope& listener_scope, - EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory, - OptRef transport_socket_factory); + EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory); ~EnvoyQuicServerSession() override; @@ -67,7 +85,7 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, void Initialize() override; void OnCanWrite() override; void OnTlsHandshakeComplete() override; - void MaybeSendRstStreamFrame(quic::QuicStreamId id, quic::QuicRstStreamErrorCode error, + void MaybeSendRstStreamFrame(quic::QuicStreamId id, quic::QuicResetStreamError error, quic::QuicStreamOffset bytes_written) override; void OnRstStream(const quic::QuicRstStreamFrame& frame) override; @@ -77,6 +95,10 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, headers_with_underscores_action_ = headers_with_underscores_action; } + void storeConnectionMapPosition(FilterChainToConnectionMap& connection_map, + const Network::FilterChain& filter_chain, + ConnectionMapIter position); + using quic::QuicSession::PerformActionOnActiveStreams; protected: @@ -113,7 +135,7 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, Stats::Scope& listener_scope_; EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory_; - OptRef transport_socket_factory_; + absl::optional position_; }; } // namespace Quic diff --git a/source/common/quic/envoy_quic_server_stream.cc b/source/common/quic/envoy_quic_server_stream.cc index f5249c30375c1..1e62a22030558 100644 --- a/source/common/quic/envoy_quic_server_stream.cc +++ b/source/common/quic/envoy_quic_server_stream.cc @@ -46,6 +46,8 @@ EnvoyQuicServerStream::EnvoyQuicServerStream( headers_with_underscores_action_(headers_with_underscores_action) { ASSERT(static_cast(GetReceiveWindow().value()) > 8 * 1024, "Send buffer limit should be larger than 8KB."); + // TODO(alyssawilk, danzh) if http3_options_.allow_extended_connect() is true, + // send the correct SETTINGS. } void EnvoyQuicServerStream::encode100ContinueHeaders(const Http::ResponseHeaderMap& headers) { @@ -167,7 +169,9 @@ void EnvoyQuicServerStream::OnInitialHeadersComplete(bool fin, size_t frame_len, onStreamError(close_connection_upon_invalid_header_, rst); return; } - if (Http::HeaderUtility::requestHeadersValid(*headers) != absl::nullopt) { + if (Http::HeaderUtility::requestHeadersValid(*headers) != absl::nullopt || + Http::HeaderUtility::checkRequiredRequestHeaders(*headers) != Http::okStatus() || + (headers->Protocol() && !http3_options_.allow_extended_connect())) { details_ = Http3ResponseCodeDetailValues::invalid_http_header; onStreamError(absl::nullopt); return; @@ -257,9 +261,9 @@ void EnvoyQuicServerStream::maybeDecodeTrailers() { } } -bool EnvoyQuicServerStream::OnStopSending(quic::QuicRstStreamErrorCode error) { +bool EnvoyQuicServerStream::OnStopSending(quic::QuicResetStreamError error) { // Only called in IETF Quic to close write side. - ENVOY_STREAM_LOG(debug, "received STOP_SENDING with reset code={}", *this, error); + ENVOY_STREAM_LOG(debug, "received STOP_SENDING with reset code={}", *this, error.internal_code()); stats_.rx_reset_.inc(); bool end_stream_encoded = local_end_stream_; // This call will close write. @@ -274,7 +278,7 @@ bool EnvoyQuicServerStream::OnStopSending(quic::QuicRstStreamErrorCode error) { if (!end_stream_encoded) { // If both directions are closed but end stream hasn't been encoded yet, notify reset callbacks. // Treat this as a remote reset, since the stream will be closed in both directions. - runResetCallbacks(quicRstErrorToEnvoyRemoteResetReason(error)); + runResetCallbacks(quicRstErrorToEnvoyRemoteResetReason(error.internal_code())); } return true; } @@ -293,14 +297,14 @@ void EnvoyQuicServerStream::OnStreamReset(const quic::QuicRstStreamFrame& frame) } } -void EnvoyQuicServerStream::Reset(quic::QuicRstStreamErrorCode error) { - ENVOY_STREAM_LOG(debug, "sending reset code={}", *this, error); +void EnvoyQuicServerStream::ResetWithError(quic::QuicResetStreamError error) { + ENVOY_STREAM_LOG(debug, "sending reset code={}", *this, error.internal_code()); stats_.tx_reset_.inc(); if (!local_end_stream_) { // Upper layers expect calling resetStream() to immediately raise reset callbacks. - runResetCallbacks(quicRstErrorToEnvoyLocalResetReason(error)); + runResetCallbacks(quicRstErrorToEnvoyLocalResetReason(error.internal_code())); } - quic::QuicSpdyServerStreamBase::Reset(error); + quic::QuicSpdyServerStreamBase::ResetWithError(error); } void EnvoyQuicServerStream::OnConnectionClosed(quic::QuicErrorCode error, @@ -392,7 +396,7 @@ void EnvoyQuicServerStream::onStreamError(absl::optional should_close_conn !http3_options_.override_stream_error_on_invalid_http_message().value(); } if (close_connection_upon_invalid_header) { - stream_delegate()->OnStreamError(quic::QUIC_HTTP_FRAME_ERROR, "Invalid headers"); + stream_delegate()->OnStreamError(quic::QUIC_HTTP_FRAME_ERROR, std::string(details_)); } else { Reset(rst); } diff --git a/source/common/quic/envoy_quic_server_stream.h b/source/common/quic/envoy_quic_server_stream.h index 45aecf8843f35..8693cd197b2bf 100644 --- a/source/common/quic/envoy_quic_server_stream.h +++ b/source/common/quic/envoy_quic_server_stream.h @@ -48,9 +48,9 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, // quic::QuicSpdyStream void OnBodyAvailable() override; - bool OnStopSending(quic::QuicRstStreamErrorCode error) override; + bool OnStopSending(quic::QuicResetStreamError error) override; void OnStreamReset(const quic::QuicRstStreamFrame& frame) override; - void Reset(quic::QuicRstStreamErrorCode error) override; + void ResetWithError(quic::QuicResetStreamError error) override; void OnClose() override; void OnCanWrite() override; // quic::QuicSpdyServerStreamBase diff --git a/source/common/quic/platform/quic_logging_impl.h b/source/common/quic/platform/quic_logging_impl.h index f5dc65dd7c431..10fdb01e91e2c 100644 --- a/source/common/quic/platform/quic_logging_impl.h +++ b/source/common/quic/platform/quic_logging_impl.h @@ -128,6 +128,7 @@ #endif #define QUICHE_PREDICT_FALSE_IMPL(x) ABSL_PREDICT_FALSE(x) +#define QUICHE_PREDICT_TRUE_IMPL(x) ABSL_PREDICT_TRUE(x) namespace quic { diff --git a/source/common/quic/platform/quiche_flags_impl.cc b/source/common/quic/platform/quiche_flags_impl.cc index 6dcd07e3f0815..af607a83adc49 100644 --- a/source/common/quic/platform/quiche_flags_impl.cc +++ b/source/common/quic/platform/quiche_flags_impl.cc @@ -34,7 +34,6 @@ absl::flat_hash_map makeFlagMap() { #undef QUIC_FLAG // Disable IETF draft 29 implementation. Envoy only supports RFC-v1. FLAGS_quic_reloadable_flag_quic_disable_version_draft_29->setValue(true); - FLAGS_quic_reloadable_flag_quic_decline_server_push_stream->setValue(true); #define QUIC_PROTOCOL_FLAG(type, flag, ...) flags.emplace(FLAGS_##flag->name(), FLAGS_##flag); #include "quiche/quic/core/quic_protocol_flags_list.h" diff --git a/source/common/quic/quic_filter_manager_connection_impl.cc b/source/common/quic/quic_filter_manager_connection_impl.cc index a2049252c60db..b69a63f2681d1 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.cc +++ b/source/common/quic/quic_filter_manager_connection_impl.cc @@ -12,7 +12,9 @@ QuicFilterManagerConnectionImpl::QuicFilterManagerConnectionImpl( // Using this for purpose other than logging is not safe. Because QUIC connection id can be // 18 bytes, so there might be collision when it's hashed to 8 bytes. : Network::ConnectionImplBase(dispatcher, /*id=*/connection_id.Hash()), - network_connection_(&connection), filter_manager_(*this, *connection.connectionSocket()), + network_connection_(&connection), + filter_manager_( + std::make_unique(*this, *connection.connectionSocket())), stream_info_(dispatcher.timeSource(), connection.connectionSocket()->connectionInfoProviderSharedPtr()), write_buffer_watermark_simulation_( @@ -22,23 +24,23 @@ QuicFilterManagerConnectionImpl::QuicFilterManagerConnectionImpl( } void QuicFilterManagerConnectionImpl::addWriteFilter(Network::WriteFilterSharedPtr filter) { - filter_manager_.addWriteFilter(filter); + filter_manager_->addWriteFilter(filter); } void QuicFilterManagerConnectionImpl::addFilter(Network::FilterSharedPtr filter) { - filter_manager_.addFilter(filter); + filter_manager_->addFilter(filter); } void QuicFilterManagerConnectionImpl::addReadFilter(Network::ReadFilterSharedPtr filter) { - filter_manager_.addReadFilter(filter); + filter_manager_->addReadFilter(filter); } void QuicFilterManagerConnectionImpl::removeReadFilter(Network::ReadFilterSharedPtr filter) { - filter_manager_.removeReadFilter(filter); + filter_manager_->removeReadFilter(filter); } bool QuicFilterManagerConnectionImpl::initializeReadFilters() { - return filter_manager_.initializeReadFilters(); + return filter_manager_->initializeReadFilters(); } void QuicFilterManagerConnectionImpl::enableHalfClose(bool enabled) { @@ -171,6 +173,7 @@ void QuicFilterManagerConnectionImpl::onConnectionCloseEvent( network_connection_ = nullptr; } + filter_manager_ = nullptr; if (!codec_stats_.has_value()) { // The connection was closed before it could be used. Stats are not recorded. return; diff --git a/source/common/quic/quic_filter_manager_connection_impl.h b/source/common/quic/quic_filter_manager_connection_impl.h index 6fabaa5ded3ce..7003860c80116 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.h +++ b/source/common/quic/quic_filter_manager_connection_impl.h @@ -82,6 +82,10 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, void setConnectionStats(const Network::Connection::ConnectionStats& stats) override { // TODO(danzh): populate stats. Network::ConnectionImplBase::setConnectionStats(stats); + if (network_connection_ == nullptr) { + ENVOY_CONN_LOG(error, "Quic connection has been detached.", *this); + return; + } network_connection_->setConnectionStats(stats); } Ssl::ConnectionInfoConstSharedPtr ssl() const override; @@ -179,7 +183,7 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, // filters are added, ConnectionManagerImpl should always be the last one. // Its onRead() is only called once to trigger ReadFilter::onNewConnection() // and the rest incoming data bypasses these filters. - Network::FilterManagerImpl filter_manager_; + std::unique_ptr filter_manager_; StreamInfo::StreamInfoImpl stream_info_; std::string transport_failure_reason_; diff --git a/source/common/quic/quic_stat_names.cc b/source/common/quic/quic_stat_names.cc index d613c48391539..7a792c19ad293 100644 --- a/source/common/quic/quic_stat_names.cc +++ b/source/common/quic/quic_stat_names.cc @@ -45,15 +45,16 @@ void QuicStatNames::chargeQuicConnectionCloseStats(Stats::Scope& scope, } void QuicStatNames::chargeQuicResetStreamErrorStats(Stats::Scope& scope, - quic::QuicRstStreamErrorCode error_code, + quic::QuicResetStreamError error_code, bool from_self, bool is_upstream) { ASSERT(&symbol_table_ == &scope.symbolTable()); - if (error_code > quic::QUIC_STREAM_LAST_ERROR) { - error_code = quic::QUIC_STREAM_LAST_ERROR; + auto internal_code = error_code.internal_code(); + if (internal_code > quic::QUIC_STREAM_LAST_ERROR) { + internal_code = quic::QUIC_STREAM_LAST_ERROR; } - const Stats::StatName stream_error = resetStreamErrorStatName(error_code); + const Stats::StatName stream_error = resetStreamErrorStatName(internal_code); incCounter(scope, {http3_prefix_, (is_upstream ? upstream_ : downstream_), (from_self ? from_self_ : from_peer_), stream_error}); } diff --git a/source/common/quic/quic_stat_names.h b/source/common/quic/quic_stat_names.h index f9eba684a76b8..19abf6921f846 100644 --- a/source/common/quic/quic_stat_names.h +++ b/source/common/quic/quic_stat_names.h @@ -21,7 +21,7 @@ class QuicStatNames { void chargeQuicConnectionCloseStats(Stats::Scope& scope, quic::QuicErrorCode error_code, quic::ConnectionCloseSource source, bool is_upstream); - void chargeQuicResetStreamErrorStats(Stats::Scope& scope, quic::QuicRstStreamErrorCode error_code, + void chargeQuicResetStreamErrorStats(Stats::Scope& scope, quic::QuicResetStreamError error_code, bool from_self, bool is_upstream); private: diff --git a/source/common/router/BUILD b/source/common/router/BUILD index ffea1d8aa65e4..4e0bbe9145706 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -63,6 +63,7 @@ envoy_cc_library( "//source/common/http:utility_lib", "//source/common/protobuf:utility_lib", "//source/common/tracing:http_tracer_lib", + "//source/common/upstream:retry_factory_lib", "//source/extensions/filters/http/common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index c3dd32519f74e..d756c607966a3 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -37,6 +37,7 @@ #include "source/common/router/retry_state_impl.h" #include "source/common/runtime/runtime_features.h" #include "source/common/tracing/http_tracer_impl.h" +#include "source/common/upstream/retry_factory.h" #include "source/extensions/filters/http/common/utility.h" #include "absl/strings/match.h" @@ -87,7 +88,8 @@ HedgePolicyImpl::HedgePolicyImpl(const envoy::config::route::v3::HedgePolicy& he HedgePolicyImpl::HedgePolicyImpl() : initial_requests_(1), hedge_on_per_try_timeout_(false) {} RetryPolicyImpl::RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& retry_policy, - ProtobufMessage::ValidationVisitor& validation_visitor) + ProtobufMessage::ValidationVisitor& validation_visitor, + Upstream::RetryExtensionFactoryContext& factory_context) : retriable_headers_( Http::HeaderUtility::buildHeaderMatcherVector(retry_policy.retriable_headers())), retriable_request_headers_( @@ -95,6 +97,8 @@ RetryPolicyImpl::RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& re validation_visitor_(&validation_visitor) { per_try_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(retry_policy, per_try_timeout, 0)); + per_try_idle_timeout_ = + std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(retry_policy, per_try_idle_timeout, 0)); num_retries_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(retry_policy, num_retries, 1); retry_on_ = RetryStateImpl::parseRetryOn(retry_policy.retry_on()).first; retry_on_ |= RetryStateImpl::parseRetryGrpcOn(retry_policy.retry_on()).first; @@ -116,6 +120,16 @@ RetryPolicyImpl::RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& re retry_priority, validation_visitor, factory)); } + for (const auto& options_predicate : retry_policy.retry_options_predicates()) { + auto& factory = + Envoy::Config::Utility::getAndCheckFactory( + options_predicate); + retry_options_predicates_.emplace_back( + factory.createOptionsPredicate(*Envoy::Config::Utility::translateToFactoryConfig( + options_predicate, validation_visitor, factory), + factory_context)); + } + auto host_selection_attempts = retry_policy.host_selection_retry_max_attempts(); if (host_selection_attempts) { host_selection_attempts_ = host_selection_attempts; @@ -348,7 +362,8 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, prefix_rewrite_redirect_(route.redirect().prefix_rewrite()), strip_query_(route.redirect().strip_query()), hedge_policy_(buildHedgePolicy(vhost.hedgePolicy(), route.route())), - retry_policy_(buildRetryPolicy(vhost.retryPolicy(), route.route(), validator)), + retry_policy_( + buildRetryPolicy(vhost.retryPolicy(), route.route(), validator, factory_context)), internal_redirect_policy_( buildInternalRedirectPolicy(route.route(), validator, route.name())), rate_limit_policy_(route.route().rate_limits(), validator), @@ -374,7 +389,7 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, vhost_.globalRouteConfig().maxDirectResponseBodySizeBytes())), per_filter_configs_(route.typed_per_filter_config(), optional_http_filters, factory_context, validator), - route_name_(route.name()), time_source_(factory_context.dispatcher().timeSource()) { + route_name_(route.name()), time_source_(factory_context.mainThreadDispatcher().timeSource()) { if (route.route().has_metadata_match()) { const auto filter_it = route.route().metadata_match().filter_metadata().find( Envoy::Config::MetadataFilters::get().ENVOY_LB); @@ -891,15 +906,18 @@ HedgePolicyImpl RouteEntryImplBase::buildHedgePolicy( RetryPolicyImpl RouteEntryImplBase::buildRetryPolicy( const absl::optional& vhost_retry_policy, const envoy::config::route::v3::RouteAction& route_config, - ProtobufMessage::ValidationVisitor& validation_visitor) const { + ProtobufMessage::ValidationVisitor& validation_visitor, + Server::Configuration::ServerFactoryContext& factory_context) const { + Upstream::RetryExtensionFactoryContextImpl retry_factory_context( + factory_context.singletonManager()); // Route specific policy wins, if available. if (route_config.has_retry_policy()) { - return RetryPolicyImpl(route_config.retry_policy(), validation_visitor); + return RetryPolicyImpl(route_config.retry_policy(), validation_visitor, retry_factory_context); } // If not, we fallback to the virtual host policy if there is one. if (vhost_retry_policy) { - return RetryPolicyImpl(vhost_retry_policy.value(), validation_visitor); + return RetryPolicyImpl(vhost_retry_policy.value(), validation_visitor, retry_factory_context); } // Otherwise, an empty policy will do. @@ -965,6 +983,23 @@ const RouteEntry* RouteEntryImplBase::routeEntry() const { } } +RouteConstSharedPtr +RouteEntryImplBase::pickClusterViaClusterHeader(const Http::LowerCaseString& cluster_header_name, + const Http::HeaderMap& headers) const { + const auto entry = headers.get(cluster_header_name); + std::string final_cluster_name; + if (!entry.empty()) { + // This is an implicitly untrusted header, so per the API documentation only + // the first value is used. + final_cluster_name = std::string(entry[0]->value().getStringView()); + } + + // NOTE: Though we return a shared_ptr here, the current ownership model + // assumes that the route table sticks around. See snapped_route_config_ in + // ConnectionManagerImpl::ActiveStream. + return std::make_shared(this, final_cluster_name); +} + RouteConstSharedPtr RouteEntryImplBase::clusterEntry(const Http::HeaderMap& headers, uint64_t random_value) const { // Gets the route object chosen from the list of weighted clusters @@ -974,23 +1009,42 @@ RouteConstSharedPtr RouteEntryImplBase::clusterEntry(const Http::HeaderMap& head return shared_from_this(); } else { ASSERT(!cluster_header_name_.get().empty()); - const auto entry = headers.get(cluster_header_name_); - std::string final_cluster_name; - if (!entry.empty()) { - // This is an implicitly untrusted header, so per the API documentation only the first - // value is used. - final_cluster_name = std::string(entry[0]->value().getStringView()); - } + return pickClusterViaClusterHeader(cluster_header_name_, headers); + } + } + return pickWeightedCluster(headers, random_value, true); +} + +RouteConstSharedPtr RouteEntryImplBase::pickWeightedCluster(const Http::HeaderMap& headers, + const uint64_t random_value, + const bool ignore_overflow) const { + const uint64_t selected_value = random_value % total_cluster_weight_; + uint64_t begin = 0; + uint64_t end = 0; + + // Find the right cluster to route to based on the interval in which + // the selected value falls. The intervals are determined as + // [0, cluster1_weight), [cluster1_weight, cluster1_weight+cluster2_weight),.. + for (const WeightedClusterEntrySharedPtr& cluster : weighted_clusters_) { + end = begin + cluster->clusterWeight(); + if (!ignore_overflow) { + // end > total_cluster_weight: This case can only occur with Runtimes, + // when the user specifies invalid weights such that + // sum(weights) > total_cluster_weight. + ASSERT(end <= total_cluster_weight_); + } - // NOTE: Though we return a shared_ptr here, the current ownership model assumes that - // the route table sticks around. See snapped_route_config_ in - // ConnectionManagerImpl::ActiveStream. - return std::make_shared(this, final_cluster_name); + if (selected_value >= begin && selected_value < end) { + if (!cluster->clusterHeaderName().get().empty() && + !headers.get(cluster->clusterHeaderName()).empty()) { + return pickClusterViaClusterHeader(cluster->clusterHeaderName(), headers); + } + return cluster; } + begin = end; } - return WeightedClusterUtil::pickCluster(weighted_clusters_, total_cluster_weight_, random_value, - true); + NOT_REACHED_GCOVR_EXCL_LINE; } void RouteEntryImplBase::validateClusters( @@ -1011,9 +1065,17 @@ void RouteEntryImplBase::validateClusters( } } else if (!weighted_clusters_.empty()) { for (const WeightedClusterEntrySharedPtr& cluster : weighted_clusters_) { - if (!cluster_info_maps.hasCluster(cluster->clusterName())) { - throw EnvoyException( - fmt::format("route: unknown weighted cluster '{}'", cluster->clusterName())); + if (!cluster->clusterName().empty()) { + if (!cluster_info_maps.hasCluster(cluster->clusterName())) { + throw EnvoyException( + fmt::format("route: unknown weighted cluster '{}'", cluster->clusterName())); + } + } + // For weighted clusters with `cluster_header_name`, we only verify that this field is + // not empty because the cluster name is not set yet at config time (hence the validation + // here). + else if (cluster->clusterHeaderName().get().empty()) { + throw EnvoyException("route: unknown weighted cluster with no cluster_header field"); } } } @@ -1269,7 +1331,6 @@ VirtualHostImpl::VirtualHostImpl( new PathRouteEntryImpl(*this, route, optional_http_filters, factory_context, validator)); break; } - case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex: case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kSafeRegex: { routes_.emplace_back( new RegexRouteEntryImpl(*this, route, optional_http_filters, factory_context, validator)); @@ -1361,9 +1422,9 @@ RouteMatcher::RouteMatcher(const envoy::config::route::v3::RouteConfiguration& r validation_clusters = factory_context.clusterManager().clusters(); } for (const auto& virtual_host_config : route_config.virtual_hosts()) { - VirtualHostSharedPtr virtual_host( - new VirtualHostImpl(virtual_host_config, optional_http_filters, global_route_config, - factory_context, *vhost_scope_, validator, validation_clusters)); + VirtualHostSharedPtr virtual_host = std::make_shared( + virtual_host_config, optional_http_filters, global_route_config, factory_context, + *vhost_scope_, validator, validation_clusters); for (const std::string& domain_name : virtual_host_config.domains()) { const std::string domain = Http::LowerCaseString(domain_name).get(); bool duplicate_found = false; diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index f291fd45e7b74..659861f3b11c9 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -292,15 +292,21 @@ class RetryPolicyImpl : public RetryPolicy { public: RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& retry_policy, - ProtobufMessage::ValidationVisitor& validation_visitor); + ProtobufMessage::ValidationVisitor& validation_visitor, + Upstream::RetryExtensionFactoryContext& factory_context); RetryPolicyImpl() = default; // Router::RetryPolicy std::chrono::milliseconds perTryTimeout() const override { return per_try_timeout_; } + std::chrono::milliseconds perTryIdleTimeout() const override { return per_try_idle_timeout_; } uint32_t numRetries() const override { return num_retries_; } uint32_t retryOn() const override { return retry_on_; } std::vector retryHostPredicates() const override; Upstream::RetryPrioritySharedPtr retryPriority() const override; + absl::Span + retryOptionsPredicates() const override { + return retry_options_predicates_; + } uint32_t hostSelectionMaxAttempts() const override { return host_selection_attempts_; } const std::vector& retriableStatusCodes() const override { return retriable_status_codes_; @@ -320,6 +326,7 @@ class RetryPolicyImpl : public RetryPolicy { private: std::chrono::milliseconds per_try_timeout_{0}; + std::chrono::milliseconds per_try_idle_timeout_{0}; // We set the number of retries to 1 by default (i.e. when no route or vhost level retry policy is // set) so that when retries get enabled through the x-envoy-retry-on header we default to 1 // retry. @@ -342,6 +349,7 @@ class RetryPolicyImpl : public RetryPolicy { std::vector reset_headers_{}; std::chrono::milliseconds reset_max_interval_{300000}; ProtobufMessage::ValidationVisitor* validation_visitor_{}; + std::vector retry_options_predicates_; }; /** @@ -811,7 +819,7 @@ class RouteEntryImplBase : public RouteEntry, const std::string& filter_name, std::function cb) const override; - const Http::LowerCaseString& clusterHeaderName() { return cluster_header_name_; } + const Http::LowerCaseString& clusterHeaderName() const { return cluster_header_name_; } private: const std::string runtime_key_; @@ -847,13 +855,20 @@ class RouteEntryImplBase : public RouteEntry, RetryPolicyImpl buildRetryPolicy(const absl::optional& vhost_retry_policy, const envoy::config::route::v3::RouteAction& route_config, - ProtobufMessage::ValidationVisitor& validation_visitor) const; + ProtobufMessage::ValidationVisitor& validation_visitor, + Server::Configuration::ServerFactoryContext& factory_context) const; InternalRedirectPolicyImpl buildInternalRedirectPolicy(const envoy::config::route::v3::RouteAction& route_config, ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name) const; + RouteConstSharedPtr pickClusterViaClusterHeader(const Http::LowerCaseString& cluster_header_name, + const Http::HeaderMap& headers) const; + + RouteConstSharedPtr pickWeightedCluster(const Http::HeaderMap& headers, uint64_t random_value, + bool ignore_overflow) const; + // Default timeout is 15s if nothing is specified in the route config. static const uint64_t DEFAULT_ROUTE_TIMEOUT_MS = 15000; diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 049e0f752fded..62f47d9221a11 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -309,9 +309,11 @@ void RdsRouteConfigProviderImpl::requestVirtualHostsUpdate( // execute the callback. still_alive shared_ptr will be deallocated when the current instance of // the RdsRouteConfigProviderImpl is deallocated; we rely on a weak_ptr to still_alive flag to // determine if the RdsRouteConfigProviderImpl instance is still valid. - factory_context_.dispatcher().post([this, maybe_still_alive = std::weak_ptr(still_alive_), - alias, &thread_local_dispatcher, - route_config_updated_cb]() -> void { + factory_context_.mainThreadDispatcher().post([this, + maybe_still_alive = + std::weak_ptr(still_alive_), + alias, &thread_local_dispatcher, + route_config_updated_cb]() -> void { if (maybe_still_alive.lock()) { subscription_->updateOnDemand(alias); config_update_callbacks_.push_back({alias, thread_local_dispatcher, route_config_updated_cb}); diff --git a/source/common/router/router.cc b/source/common/router/router.cc index d1115c8a40df0..054b6a6858ecd 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -160,6 +160,7 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::RequestHeaderMap& req } } timeout.per_try_timeout_ = route.retryPolicy().perTryTimeout(); + timeout.per_try_idle_timeout_ = route.retryPolicy().perTryIdleTimeout(); uint64_t header_timeout; @@ -502,12 +503,33 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // Fetch a connection pool for the upstream cluster. const auto& upstream_http_protocol_options = cluster_->upstreamHttpProtocolOptions(); - if (upstream_http_protocol_options.has_value()) { - const auto parsed_authority = Http::Utility::parseAuthority(headers.getHostValue()); - if (!parsed_authority.is_ip_address_ && upstream_http_protocol_options.value().auto_sni()) { + if (upstream_http_protocol_options.has_value() && + (upstream_http_protocol_options.value().auto_sni() || + upstream_http_protocol_options.value().auto_san_validation())) { + // Default the header to Host/Authority header. + absl::string_view header_value = headers.getHostValue(); + + // Check whether `override_auto_sni_header` is specified. + const auto override_auto_sni_header = + upstream_http_protocol_options.value().override_auto_sni_header(); + if (!override_auto_sni_header.empty()) { + // Use the header value from `override_auto_sni_header` to set the SNI value. + const auto overridden_header_value = Http::HeaderUtility::getAllOfHeaderAsString( + headers, Http::LowerCaseString(override_auto_sni_header)); + if (overridden_header_value.result().has_value() && + !overridden_header_value.result().value().empty()) { + header_value = overridden_header_value.result().value(); + } + } + const auto parsed_authority = Http::Utility::parseAuthority(header_value); + bool should_set_sni = !parsed_authority.is_ip_address_; + // `host_` returns a string_view so doing this should be safe. + absl::string_view sni_value = parsed_authority.host_; + + if (should_set_sni && upstream_http_protocol_options.value().auto_sni()) { callbacks_->streamInfo().filterState()->setData( Network::UpstreamServerName::key(), - std::make_unique(parsed_authority.host_), + std::make_unique(sni_value), StreamInfo::FilterState::StateType::Mutable); } @@ -515,7 +537,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, callbacks_->streamInfo().filterState()->setData( Network::UpstreamSubjectAltNames::key(), std::make_unique( - std::vector{std::string(parsed_authority.host_)}), + std::vector{std::string(sni_value)}), StreamInfo::FilterState::StateType::Mutable); } } @@ -931,6 +953,7 @@ void Filter::onSoftPerTryTimeout(UpstreamRequest& upstream_request) { retry_state_->shouldHedgeRetryPerTryTimeout([this]() -> void { doRetry(); }); if (retry_status == RetryStatus::Yes) { + runRetryOptionsPredicates(upstream_request); pending_retries_++; // Don't increment upstream_host->stats().rq_error_ here, we'll do that @@ -948,13 +971,24 @@ void Filter::onSoftPerTryTimeout(UpstreamRequest& upstream_request) { } } +void Filter::onPerTryIdleTimeout(UpstreamRequest& upstream_request) { + onPerTryTimeoutCommon(upstream_request, cluster_->stats().upstream_rq_per_try_idle_timeout_, + StreamInfo::ResponseCodeDetails::get().UpstreamPerTryIdleTimeout); +} + void Filter::onPerTryTimeout(UpstreamRequest& upstream_request) { + onPerTryTimeoutCommon(upstream_request, cluster_->stats().upstream_rq_per_try_timeout_, + StreamInfo::ResponseCodeDetails::get().UpstreamPerTryTimeout); +} + +void Filter::onPerTryTimeoutCommon(UpstreamRequest& upstream_request, Stats::Counter& error_counter, + const std::string& response_code_details) { if (hedging_params_.hedge_on_per_try_timeout_) { onSoftPerTryTimeout(upstream_request); return; } - cluster_->stats().upstream_rq_per_try_timeout_.inc(); + error_counter.inc(); if (upstream_request.upstreamHost()) { upstream_request.upstreamHost()->stats().rq_timeout_.inc(); } @@ -972,8 +1006,7 @@ void Filter::onPerTryTimeout(UpstreamRequest& upstream_request) { // Remove this upstream request from the list now that we're done with it. upstream_request.removeFromList(upstream_requests_); - onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout, - StreamInfo::ResponseCodeDetails::get().UpstreamPerTryTimeout); + onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout, response_code_details); } void Filter::onStreamMaxDurationReached(UpstreamRequest& upstream_request) { @@ -1071,6 +1104,7 @@ bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason, const RetryStatus retry_status = retry_state_->shouldRetryReset(reset_reason, [this]() -> void { doRetry(); }); if (retry_status == RetryStatus::Yes) { + runRetryOptionsPredicates(upstream_request); pending_retries_++; if (upstream_request.upstreamHost()) { @@ -1288,6 +1322,7 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPt const RetryStatus retry_status = retry_state_->shouldRetryHeaders(*headers, [this]() -> void { doRetry(); }); if (retry_status == RetryStatus::Yes) { + runRetryOptionsPredicates(upstream_request); pending_retries_++; upstream_request.upstreamHost()->stats().rq_error_.inc(); Http::CodeStats& code_stats = httpContext().codeStats(); @@ -1619,6 +1654,17 @@ bool Filter::convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& do return true; } +void Filter::runRetryOptionsPredicates(UpstreamRequest& retriable_request) { + for (const auto& options_predicate : route_entry_->retryPolicy().retryOptionsPredicates()) { + const Upstream::RetryOptionsPredicate::UpdateOptionsParameters parameters{ + retriable_request.streamInfo(), upstreamSocketOptions()}; + auto ret = options_predicate->updateOptions(parameters); + if (ret.new_upstream_socket_options_.has_value()) { + upstream_options_ = ret.new_upstream_socket_options_.value(); + } + } +} + void Filter::doRetry() { ENVOY_STREAM_LOG(debug, "performing retry", *callbacks_); diff --git a/source/common/router/router.h b/source/common/router/router.h index 9a1d057793a26..e9d0234aed5d0 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -51,6 +51,7 @@ class FilterUtility { struct TimeoutData { std::chrono::milliseconds global_timeout_{0}; std::chrono::milliseconds per_try_timeout_{0}; + std::chrono::milliseconds per_try_idle_timeout_{0}; }; struct HedgingParams { @@ -271,6 +272,7 @@ class RouterFilterInterface { UpstreamRequest& upstream_request) PURE; virtual void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) PURE; virtual void onPerTryTimeout(UpstreamRequest& upstream_request) PURE; + virtual void onPerTryIdleTimeout(UpstreamRequest& upstream_request) PURE; virtual void onStreamMaxDurationReached(UpstreamRequest& upstream_request) PURE; virtual Http::StreamDecoderFilterCallbacks* callbacks() PURE; @@ -445,6 +447,7 @@ class Filter : Logger::Loggable, UpstreamRequest& upstream_request) override; void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) override; void onPerTryTimeout(UpstreamRequest& upstream_request) override; + void onPerTryIdleTimeout(UpstreamRequest& upstream_request) override; void onStreamMaxDurationReached(UpstreamRequest& upstream_request) override; Http::StreamDecoderFilterCallbacks* callbacks() override { return callbacks_; } Upstream::ClusterInfoConstSharedPtr cluster() override { return cluster_; } @@ -469,8 +472,8 @@ class Filter : Logger::Loggable, private: friend class UpstreamRequest; - RetryStatePtr retry_state_; - + void onPerTryTimeoutCommon(UpstreamRequest& upstream_request, Stats::Counter& error_counter, + const std::string& response_code_details); Stats::StatName upstreamZone(Upstream::HostDescriptionConstSharedPtr upstream_host); void chargeUpstreamCode(uint64_t response_status_code, const Http::ResponseHeaderMap& response_headers, @@ -521,6 +524,7 @@ class Filter : Logger::Loggable, void updateOutlierDetection(Upstream::Outlier::Result result, UpstreamRequest& upstream_request, absl::optional code); void doRetry(); + void runRetryOptionsPredicates(UpstreamRequest& retriable_request); // Called immediately after a non-5xx header is received from upstream, performs stats accounting // and handle difference between gRPC and non-gRPC requests. void handleNon5xxResponseHeaders(absl::optional grpc_status, @@ -528,6 +532,7 @@ class Filter : Logger::Loggable, uint64_t grpc_to_http_status); Http::Context& httpContext() { return config_.http_context_; } + RetryStatePtr retry_state_; FilterConfig& config_; Http::StreamDecoderFilterCallbacks* callbacks_{}; RouteConstSharedPtr route_; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index eac4557b0ceb8..133e91e313ddf 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -196,7 +196,7 @@ void ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::initRdsConfigPro rds_update_callback_handle_ = route_provider_->subscription().addUpdateCallback([this]() { // Subscribe to RDS update. - parent_.onRdsConfigUpdate(scope_name_, route_provider_->subscription()); + parent_.onRdsConfigUpdate(scope_name_, route_provider_->config()); }); parent_.stats_.active_scopes_.inc(); } @@ -209,21 +209,19 @@ void ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::maybeInitRdsConf // Create a init_manager to create a rds provider. // No transitive warming dependency here because only on demand update reach this point. - std::unique_ptr srds_init_mgr = - std::make_unique(fmt::format("SRDS on demand init manager.")); - std::unique_ptr srds_initialization_continuation = - std::make_unique([this, &srds_init_mgr] { - Init::WatcherImpl noop_watcher( - fmt::format("SRDS on demand ConfigUpdate watcher: {}", scope_name_), - []() { /*Do nothing.*/ }); - srds_init_mgr->initialize(noop_watcher); - }); + Init::ManagerImpl srds_init_mgr("SRDS on demand init manager."); + Cleanup srds_initialization_continuation([this, &srds_init_mgr] { + Init::WatcherImpl noop_watcher( + fmt::format("SRDS on demand ConfigUpdate watcher: {}", scope_name_), + []() { /*Do nothing.*/ }); + srds_init_mgr.initialize(noop_watcher); + }); // Create route provider. envoy::extensions::filters::network::http_connection_manager::v3::Rds rds; rds.mutable_config_source()->MergeFrom(parent_.rds_config_source_); rds.set_route_config_name( parent_.scoped_route_map_[scope_name_]->configProto().route_configuration_name()); - initRdsConfigProvider(rds, *srds_init_mgr); + initRdsConfigProvider(rds, srds_init_mgr); ENVOY_LOG(debug, fmt::format("Scope on demand update: {}", scope_name_)); // If RouteConfiguration hasn't been initialized, routeConfig() return a shared_ptr to // NullConfigImpl. The name of NullConfigImpl is an empty string. @@ -231,7 +229,7 @@ void ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::maybeInitRdsConf return; } // If RouteConfiguration has been initialized, apply update to all the threads. - parent_.onRdsConfigUpdate(scope_name_, route_provider_->subscription()); + parent_.onRdsConfigUpdate(scope_name_, route_provider_->config()); } bool ScopedRdsConfigSubscription::addOrUpdateScopes( @@ -393,16 +391,13 @@ void ScopedRdsConfigSubscription::onConfigUpdate( } void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_name, - RdsRouteConfigSubscription& rds_subscription) { + ConfigConstSharedPtr new_rds_config) { auto iter = scoped_route_map_.find(scope_name); ASSERT(iter != scoped_route_map_.end(), fmt::format("trying to update route config for non-existing scope {}", scope_name)); auto new_scoped_route_info = std::make_shared( envoy::config::route::v3::ScopedRouteConfiguration(iter->second->configProto()), - std::make_shared( - rds_subscription.routeConfigUpdate()->protobufConfiguration(), optional_http_filters_, - factory_context_, factory_context_.messageValidationContext().dynamicValidationVisitor(), - false)); + std::move(new_rds_config)); applyConfigUpdate([new_scoped_route_info](ConfigProvider::ConfigConstSharedPtr config) -> ConfigProvider::ConfigConstSharedPtr { auto* thread_local_scoped_config = @@ -487,8 +482,8 @@ void ScopedRdsConfigSubscription::onDemandRdsUpdate( std::shared_ptr scope_key, Event::Dispatcher& thread_local_dispatcher, Http::RouteConfigUpdatedCallback&& route_config_updated_cb, std::weak_ptr weak_subscription) { - factory_context_.dispatcher().post([this, &thread_local_dispatcher, scope_key, - route_config_updated_cb, weak_subscription]() { + factory_context_.mainThreadDispatcher().post([this, &thread_local_dispatcher, scope_key, + route_config_updated_cb, weak_subscription]() { // If the subscription has been destroyed, return immediately. if (!weak_subscription.lock()) { thread_local_dispatcher.post([route_config_updated_cb] { route_config_updated_cb(false); }); diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index b0e5690bee8e8..d21d812741e3c 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -217,8 +217,7 @@ class ScopedRdsConfigSubscription DeltaConfigSubscriptionInstance::onConfigUpdateFailed(); } // Propagate RDS updates to ScopeConfigImpl in workers. - void onRdsConfigUpdate(const std::string& scope_name, - RdsRouteConfigSubscription& rds_subscription); + void onRdsConfigUpdate(const std::string& scope_name, ConfigConstSharedPtr new_rds_config); // ScopedRouteInfo by scope name. ScopedRouteMap scoped_route_map_; diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 35cbfc8f28fcf..e90be591d22e4 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -80,6 +80,10 @@ UpstreamRequest::~UpstreamRequest() { // Allows for testing. per_try_timeout_->disableTimer(); } + if (per_try_idle_timeout_ != nullptr) { + // Allows for testing. + per_try_idle_timeout_->disableTimer(); + } if (max_stream_duration_timer_ != nullptr) { max_stream_duration_timer_->disableTimer(); } @@ -136,6 +140,7 @@ void UpstreamRequest::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& head void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) { ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); + resetPerTryIdleTimer(); addResponseHeadersSize(headers->byteSize()); // We drop 1xx other than 101 on the floor; 101 upgrade headers need to be passed to the client as @@ -177,6 +182,7 @@ void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool e void UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) { ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); + resetPerTryIdleTimer(); maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); parent_.onUpstreamData(data, *this, end_stream); @@ -331,6 +337,12 @@ void UpstreamRequest::resetStream() { } } +void UpstreamRequest::resetPerTryIdleTimer() { + if (per_try_idle_timeout_ != nullptr) { + per_try_idle_timeout_->enableTimer(parent_.timeout().per_try_idle_timeout_); + } +} + void UpstreamRequest::setupPerTryTimeout() { ASSERT(!per_try_timeout_); if (parent_.timeout().per_try_timeout_.count() > 0) { @@ -338,6 +350,19 @@ void UpstreamRequest::setupPerTryTimeout() { parent_.callbacks()->dispatcher().createTimer([this]() -> void { onPerTryTimeout(); }); per_try_timeout_->enableTimer(parent_.timeout().per_try_timeout_); } + + ASSERT(!per_try_idle_timeout_); + if (parent_.timeout().per_try_idle_timeout_.count() > 0) { + per_try_idle_timeout_ = + parent_.callbacks()->dispatcher().createTimer([this]() -> void { onPerTryIdleTimeout(); }); + resetPerTryIdleTimer(); + } +} + +void UpstreamRequest::onPerTryIdleTimeout() { + ENVOY_STREAM_LOG(debug, "upstream per try idle timeout", *parent_.callbacks()); + stream_info_.setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); + parent_.onPerTryIdleTimeout(*this); } void UpstreamRequest::onPerTryTimeout() { @@ -369,12 +394,7 @@ void UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason, reset_reason = Http::StreamResetReason::ConnectionFailure; break; case ConnectionPool::PoolFailureReason::Timeout: - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure")) { - reset_reason = Http::StreamResetReason::ConnectionFailure; - } else { - reset_reason = Http::StreamResetReason::LocalReset; - } + reset_reason = Http::StreamResetReason::ConnectionFailure; } // Mimic an upstream reset. diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index f0b07e8bdacd5..98f214c0d277d 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -46,7 +46,6 @@ class UpstreamRequest : public Logger::Loggable, void resetStream(); void setupPerTryTimeout(); - void onPerTryTimeout(); void maybeEndDecode(bool end_stream); void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host); @@ -132,11 +131,15 @@ class UpstreamRequest : public Logger::Loggable, void addResponseHeadersSize(uint64_t size) { response_headers_size_ = response_headers_size_.value_or(0) + size; } + void resetPerTryIdleTimer(); + void onPerTryTimeout(); + void onPerTryIdleTimeout(); RouterFilterInterface& parent_; std::unique_ptr conn_pool_; bool grpc_rq_success_deferred_; Event::TimerPtr per_try_timeout_; + Event::TimerPtr per_try_idle_timeout_; std::unique_ptr upstream_; absl::optional deferred_reset_reason_; Buffer::InstancePtr buffered_request_body_; diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 50cba15839226..00658f8279fd0 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -55,7 +55,6 @@ constexpr const char* runtime_features[] = { // Enabled "envoy.reloadable_features.test_feature_true", // Begin alphabetically sorted section. - "envoy.deprecated_features.allow_deprecated_extension_names", "envoy.reloadable_features.add_and_validate_scheme_header", "envoy.reloadable_features.allow_response_for_timeout", "envoy.reloadable_features.check_unsupported_typed_per_filter_config", @@ -86,15 +85,15 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.preserve_downstream_scheme", "envoy.reloadable_features.remove_forked_chromium_url", "envoy.reloadable_features.require_strict_1xx_and_204_response_headers", - "envoy.reloadable_features.return_502_for_upstream_protocol_errors", "envoy.reloadable_features.send_strict_1xx_and_204_response_headers", "envoy.reloadable_features.strip_port_from_connect", "envoy.reloadable_features.treat_host_like_authority", - "envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure", + "envoy.reloadable_features.udp_listener_updates_filter_chain_in_place", "envoy.reloadable_features.udp_per_event_loop_read_limit", "envoy.reloadable_features.unquote_log_string_values", "envoy.reloadable_features.upstream_host_weight_change_causes_rebuild", "envoy.reloadable_features.use_observable_cluster_name", + "envoy.reloadable_features.validate_connect", "envoy.reloadable_features.vhds_heartbeats", "envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "envoy.reloadable_features.upstream_http2_flood_checks", @@ -113,8 +112,6 @@ constexpr const char* runtime_features[] = { // When features are added here, there should be a tracking bug assigned to the // code owner to flip the default after sufficient testing. constexpr const char* disabled_runtime_features[] = { - // v2 is fatal-by-default. - "envoy.test_only.broken_in_production.enable_deprecated_v2_api", // TODO(asraa) flip to true in a separate PR to enable the new JSON by default. "envoy.reloadable_features.remove_legacy_json", // Sentinel and test flag. diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index f2ed3301ab61a..110e5acce70b6 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -140,9 +140,10 @@ class TlsCertificateSdsApi : public SdsApi, public TlsCertificateConfigProvider Config::Utility::checkLocalInfo("TlsCertificateSdsApi", secret_provider_context.localInfo()); return std::make_shared( sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), - secret_provider_context.dispatcher().timeSource(), + secret_provider_context.mainThreadDispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); + destructor_cb, secret_provider_context.mainThreadDispatcher(), + secret_provider_context.api()); } TlsCertificateSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, @@ -226,9 +227,10 @@ class CertificateValidationContextSdsApi : public SdsApi, secret_provider_context.localInfo()); return std::make_shared( sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), - secret_provider_context.dispatcher().timeSource(), + secret_provider_context.mainThreadDispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); + destructor_cb, secret_provider_context.mainThreadDispatcher(), + secret_provider_context.api()); } CertificateValidationContextSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, const std::string& sds_config_name, @@ -320,9 +322,10 @@ class TlsSessionTicketKeysSdsApi : public SdsApi, public TlsSessionTicketKeysCon secret_provider_context.localInfo()); return std::make_shared( sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), - secret_provider_context.dispatcher().timeSource(), + secret_provider_context.mainThreadDispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); + destructor_cb, secret_provider_context.mainThreadDispatcher(), + secret_provider_context.api()); } TlsSessionTicketKeysSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, @@ -392,9 +395,10 @@ class GenericSecretSdsApi : public SdsApi, public GenericSecretConfigProvider { Config::Utility::checkLocalInfo("GenericSecretSdsApi", secret_provider_context.localInfo()); return std::make_shared( sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), - secret_provider_context.dispatcher().timeSource(), + secret_provider_context.mainThreadDispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); + destructor_cb, secret_provider_context.mainThreadDispatcher(), + secret_provider_context.api()); } GenericSecretSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, diff --git a/source/common/signal/signal_action.cc b/source/common/signal/signal_action.cc index aec06302ef8f6..a771f485f8fd8 100644 --- a/source/common/signal/signal_action.cc +++ b/source/common/signal/signal_action.cc @@ -54,12 +54,16 @@ void SignalAction::sigHandler(int sig, siginfo_t* info, void* context) { } void SignalAction::installSigHandlers() { + // sigaltstack and backtrace() are incompatible on Apple platforms + // https://reviews.llvm.org/D28265 +#if !defined(__APPLE__) stack_t stack; stack.ss_sp = altstack_ + guard_size_; // Guard page at one end ... stack.ss_size = altstack_size_; // ... guard page at the other stack.ss_flags = 0; RELEASE_ASSERT(sigaltstack(&stack, &previous_altstack_) == 0, ""); +#endif // Make sure VersionInfo::version() is initialized so we don't allocate std::string in signal // handlers. @@ -78,13 +82,11 @@ void SignalAction::installSigHandlers() { } void SignalAction::removeSigHandlers() { -#if defined(__APPLE__) - // ss_flags contains SS_DISABLE, but Darwin still checks the size, contrary to the man page - if (previous_altstack_.ss_size < MINSIGSTKSZ) { - previous_altstack_.ss_size = MINSIGSTKSZ; - } -#endif +// sigaltstack and backtrace() are incompatible on Apple platforms +// https://reviews.llvm.org/D28265 +#if !defined(__APPLE__) RELEASE_ASSERT(sigaltstack(&previous_altstack_, nullptr) == 0, ""); +#endif int hidx = 0; for (const auto& sig : FATAL_SIGS) { diff --git a/source/common/signal/signal_action.h b/source/common/signal/signal_action.h index 79bc2ab30c765..82ae486cc09f3 100644 --- a/source/common/signal/signal_action.h +++ b/source/common/signal/signal_action.h @@ -128,7 +128,11 @@ class SignalAction : NonCopyable { void unmapStackMemory(); char* altstack_{}; std::array previous_handlers_; +// sigaltstack and backtrace() are incompatible on Apple platforms +// https://reviews.llvm.org/D28265 +#if !defined(__APPLE__) stack_t previous_altstack_; +#endif }; } // namespace Envoy diff --git a/source/common/stats/BUILD b/source/common/stats/BUILD index f71156c712426..7e3ef609fba5b 100644 --- a/source/common/stats/BUILD +++ b/source/common/stats/BUILD @@ -24,6 +24,18 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "custom_stat_namespaces_lib", + srcs = ["custom_stat_namespaces_impl.cc"], + hdrs = ["custom_stat_namespaces_impl.h"], + deps = [ + "//envoy/stats:custom_stat_namespaces_interface", + "//source/common/common:assert_lib", + "//source/common/common:macros", + "//source/common/common:thread_lib", + ], +) + envoy_cc_library( name = "histogram_lib", srcs = ["histogram_impl.cc"], diff --git a/source/common/stats/allocator_impl.cc b/source/common/stats/allocator_impl.cc index 4464f41a344e6..9e8a37705e4d2 100644 --- a/source/common/stats/allocator_impl.cc +++ b/source/common/stats/allocator_impl.cc @@ -65,8 +65,8 @@ void AllocatorImpl::debugPrint() { // which we need in order to clean up the counter and gauge maps in that class // when they are destroyed. // -// We implement the RefcountInterface API, using 16 bits that would otherwise be -// wasted in the alignment padding next to flags_. +// We implement the RefcountInterface API to avoid weak counter and destructor overhead in +// shared_ptr. template class StatsSharedImpl : public MetricImpl { public: StatsSharedImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name, diff --git a/source/common/stats/custom_stat_namespaces_impl.cc b/source/common/stats/custom_stat_namespaces_impl.cc new file mode 100644 index 0000000000000..89c95344fc2b5 --- /dev/null +++ b/source/common/stats/custom_stat_namespaces_impl.cc @@ -0,0 +1,33 @@ +#include "source/common/stats/custom_stat_namespaces_impl.h" + +#include "source/common/common/assert.h" +#include "source/common/common/thread.h" + +namespace Envoy { +namespace Stats { + +bool CustomStatNamespacesImpl::registered(const absl::string_view name) const { + ASSERT(Thread::MainThread::isMainOrTestThread()); + return namespaces_.find(name) != namespaces_.end(); +} + +void CustomStatNamespacesImpl::registerStatNamespace(const absl::string_view name) { + ASSERT(Thread::MainThread::isMainOrTestThread()); + namespaces_.insert(std::string(name)); +}; + +absl::optional +CustomStatNamespacesImpl::stripRegisteredPrefix(const absl::string_view stat_name) const { + ASSERT(Thread::MainThread::isMainOrTestThread()); + if (!namespaces_.empty()) { + const auto pos = stat_name.find_first_of('.'); + if (pos != std::string::npos && registered(stat_name.substr(0, pos))) { + // Trim the custom namespace. + return stat_name.substr(pos + 1); + } + } + return absl::nullopt; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/custom_stat_namespaces_impl.h b/source/common/stats/custom_stat_namespaces_impl.h new file mode 100644 index 0000000000000..0f90b28d5ba43 --- /dev/null +++ b/source/common/stats/custom_stat_namespaces_impl.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/stats/custom_stat_namespaces.h" + +#include "absl/container/flat_hash_set.h" + +namespace Envoy { +namespace Stats { + +class CustomStatNamespacesImpl : public CustomStatNamespaces { +public: + ~CustomStatNamespacesImpl() override = default; + + // CustomStatNamespaces + bool registered(const absl::string_view name) const override; + void registerStatNamespace(const absl::string_view name) override; + absl::optional + stripRegisteredPrefix(const absl::string_view stat_name) const override; + +private: + absl::flat_hash_set namespaces_; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 0e88476e5e47f..2b79daa7ee86b 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -539,14 +539,6 @@ Counter& ThreadLocalStoreImpl::ScopeImpl::counterFromStatNameWithTags( } // Determine the final name based on the prefix and the passed name. - // - // Note that we can do map.find(final_name.c_str()), but we cannot do - // map[final_name.c_str()] as the char*-keyed maps would then save the pointer - // to a temporary, and address sanitization errors would follow. Instead we - // must do a find() first, using the value if it succeeds. If it fails, then - // after we construct the stat we can insert it into the required maps. This - // strategy costs an extra hash lookup for each miss, but saves time - // re-copying the string and significant memory overhead. TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable()); Stats::StatName final_stat_name = joiner.nameWithTags(); @@ -600,12 +592,6 @@ Gauge& ThreadLocalStoreImpl::ScopeImpl::gaugeFromStatNameWithTags( // See comments in counter(). There is no super clean way (via templates or otherwise) to // share this code so I'm leaving it largely duplicated for now. - // - // Note that we can do map.find(final_name.c_str()), but we cannot do - // map[final_name.c_str()] as the char*-keyed maps would then save the pointer to - // a temporary, and address sanitization errors would follow. Instead we must - // do a find() first, using that if it succeeds. If it fails, then after we - // construct the stat we can insert it into the required maps. TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable()); StatName final_stat_name = joiner.nameWithTags(); @@ -617,7 +603,7 @@ Gauge& ThreadLocalStoreImpl::ScopeImpl::gaugeFromStatNameWithTags( StatRefMap* tls_cache = nullptr; StatNameHashSet* tls_rejected_stats = nullptr; if (!parent_.shutting_down_ && parent_.tls_cache_) { - TlsCacheEntry& entry = parent_.tlsCache().scope_cache_[this->scope_id_]; + TlsCacheEntry& entry = parent_.tlsCache().insertScope(this->scope_id_); tls_cache = &entry.gauges_; tls_rejected_stats = &entry.rejected_stats_; } @@ -642,13 +628,6 @@ Histogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatNameWithTags( // See comments in counter(). There is no super clean way (via templates or otherwise) to // share this code so I'm leaving it largely duplicated for now. - // - // Note that we can do map.find(final_name.c_str()), but we cannot do - // map[final_name.c_str()] as the char*-keyed maps would then save the pointer to - // a temporary, and address sanitization errors would follow. Instead we must - // do a find() first, using that if it succeeds. If it fails, then after we - // construct the stat we can insert it into the required maps. - TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable()); StatName final_stat_name = joiner.nameWithTags(); @@ -660,7 +639,7 @@ Histogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatNameWithTags( StatNameHashMap* tls_cache = nullptr; StatNameHashSet* tls_rejected_stats = nullptr; if (!parent_.shutting_down_ && parent_.tls_cache_) { - TlsCacheEntry& entry = parent_.tlsCache().scope_cache_[this->scope_id_]; + TlsCacheEntry& entry = parent_.tlsCache().insertScope(this->scope_id_); tls_cache = &entry.parent_histograms_; auto iter = tls_cache->find(final_stat_name); if (iter != tls_cache->end()) { @@ -720,14 +699,6 @@ TextReadout& ThreadLocalStoreImpl::ScopeImpl::textReadoutFromStatNameWithTags( } // Determine the final name based on the prefix and the passed name. - // - // Note that we can do map.find(final_name.c_str()), but we cannot do - // map[final_name.c_str()] as the char*-keyed maps would then save the pointer - // to a temporary, and address sanitization errors would follow. Instead we - // must do a find() first, using the value if it succeeds. If it fails, then - // after we construct the stat we can insert it into the required maps. This - // strategy costs an extra hash lookup for each miss, but saves time - // re-copying the string and significant memory overhead. TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable()); Stats::StatName final_stat_name = joiner.nameWithTags(); diff --git a/source/common/stats/utility.cc b/source/common/stats/utility.cc index a4e5574713e23..69fe60d516ba1 100644 --- a/source/common/stats/utility.cc +++ b/source/common/stats/utility.cc @@ -68,11 +68,6 @@ struct ElementVisitor { namespace Utility { -ScopePtr scopeFromElements(Scope& scope, const ElementVec& elements) { - ElementVisitor visitor(scope.symbolTable(), elements); - return scope.scopeFromStatName(visitor.statName()); -} - ScopePtr scopeFromStatNames(Scope& scope, const StatNameVec& elements) { SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); return scope.scopeFromStatName(StatName(joined.get())); diff --git a/source/common/stats/utility.h b/source/common/stats/utility.h index 2ef817b61c171..5c067f628f103 100644 --- a/source/common/stats/utility.h +++ b/source/common/stats/utility.h @@ -63,22 +63,6 @@ std::string sanitizeStatsName(absl::string_view name); */ absl::optional findTag(const Metric& metric, StatName find_tag_name); -/** - * Creates a nested scope from a vector of tokens which are used to create the - * name. The tokens can be specified as DynamicName or StatName. For - * tokens specified as DynamicName, a dynamic StatName will be created. See - * https://github.com/envoyproxy/envoy/blob/main/source/docs/stats.md#dynamic-stat-tokens - * for more detail on why symbolic StatNames are preferred when possible. - * - * See also scopeFromStatNames, which is slightly faster but does not allow - * passing DynamicName(string)s as names. - * - * @param scope The scope in which to create the counter. - * @param elements The vector of mixed DynamicName and StatName - * @return A scope named using the joined elements. - */ -ScopePtr scopeFromElements(Scope& scope, const ElementVec& elements); - /** * Creates a nested scope from a vector of StatNames which are used to create the * name. diff --git a/source/common/stream_info/BUILD b/source/common/stream_info/BUILD index 8ed41ec327106..60f7ac62f3c02 100644 --- a/source/common/stream_info/BUILD +++ b/source/common/stream_info/BUILD @@ -49,3 +49,11 @@ envoy_cc_library( "//envoy/stream_info:uint32_accessor_interface", ], ) + +envoy_cc_library( + name = "upstream_address_lib", + hdrs = ["upstream_address.h"], + deps = [ + "//envoy/stream_info:filter_state_interface", + ], +) diff --git a/source/common/stream_info/upstream_address.h b/source/common/stream_info/upstream_address.h new file mode 100644 index 0000000000000..eb09a641ac9bf --- /dev/null +++ b/source/common/stream_info/upstream_address.h @@ -0,0 +1,24 @@ +#pragma once + +#include "envoy/network/address.h" +#include "envoy/stream_info/filter_state.h" + +#include "absl/container/flat_hash_set.h" + +namespace Envoy { +namespace StreamInfo { + +/* + * A FilterState object that wraps a network address shared pointer. + */ +class UpstreamAddress : public FilterState::Object { +public: + static const std::string& key() { + CONSTRUCT_ON_FIRST_USE(std::string, "envoy.stream.upstream_address"); + } + + Network::Address::InstanceConstSharedPtr address_; +}; + +} // namespace StreamInfo +} // namespace Envoy diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index 5cb4675975b97..b22fa5e956e5f 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -37,51 +37,8 @@ const std::string& PerConnectionCluster::key() { CONSTRUCT_ON_FIRST_USE(std::string, "envoy.tcp_proxy.cluster"); } -Config::RouteImpl::RouteImpl( - const Config& parent, - const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute& - config) - : parent_(parent) { - cluster_name_ = config.cluster(); - - source_ips_ = Network::Address::IpList(config.source_ip_list()); - destination_ips_ = Network::Address::IpList(config.destination_ip_list()); - - if (!config.source_ports().empty()) { - Network::Utility::parsePortRangeList(config.source_ports(), source_port_ranges_); - } - - if (!config.destination_ports().empty()) { - Network::Utility::parsePortRangeList(config.destination_ports(), destination_port_ranges_); - } -} - -bool Config::RouteImpl::matches(Network::Connection& connection) const { - if (!source_port_ranges_.empty() && - !Network::Utility::portInRangeList(*connection.connectionInfoProvider().remoteAddress(), - source_port_ranges_)) { - return false; - } - - if (!source_ips_.empty() && - !source_ips_.contains(*connection.connectionInfoProvider().remoteAddress())) { - return false; - } - - if (!destination_port_ranges_.empty() && - !Network::Utility::portInRangeList(*connection.connectionInfoProvider().localAddress(), - destination_port_ranges_)) { - return false; - } - - if (!destination_ips_.empty() && - !destination_ips_.contains(*connection.connectionInfoProvider().localAddress())) { - return false; - } - - // if we made it past all checks, the route matches - return true; -} +Config::SimpleRouteImpl::SimpleRouteImpl(const Config& parent, absl::string_view cluster_name) + : parent_(parent), cluster_name_(cluster_name) {} Config::WeightedClusterEntry::WeightedClusterEntry( const Config& parent, const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy:: @@ -139,10 +96,7 @@ Config::Config(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProx }); if (!config.cluster().empty()) { - envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute - default_route; - default_route.set_cluster(config.cluster()); - routes_.emplace_back(std::make_shared(*this, default_route)); + default_route_ = std::make_shared(*this, config.cluster()); } if (config.has_metadata_match()) { @@ -156,9 +110,8 @@ Config::Config(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProx } } - // Weighted clusters will be enabled only if both the default cluster and - // deprecated v1 routes are absent. - if (routes_.empty() && config.has_weighted_clusters()) { + // Weighted clusters will be enabled only if the default cluster is absent. + if (default_route_ == nullptr && config.has_weighted_clusters()) { total_cluster_weight_ = 0; for (const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::WeightedCluster:: ClusterWeight& cluster_desc : config.weighted_clusters().clusters()) { @@ -186,16 +139,11 @@ RouteConstSharedPtr Config::getRegularRouteFromEntries(Network::Connection& conn connection.streamInfo().filterState()->getDataReadOnly( PerConnectionCluster::key()); - envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute - per_connection_route; - per_connection_route.set_cluster(per_connection_cluster.value()); - return std::make_shared(*this, per_connection_route); + return std::make_shared(*this, per_connection_cluster.value()); } - for (const RouteConstSharedPtr& route : routes_) { - if (route->matches(connection)) { - return route; - } + if (default_route_ != nullptr) { + return default_route_; } // no match, no more routes to try diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index b157db0e1b845..3b64e58f5add5 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -161,24 +161,17 @@ class Config { const Network::HashPolicy* hashPolicy() { return hash_policy_.get(); } private: - struct RouteImpl : public Route { - RouteImpl( - const Config& parent, - const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute& - config); + struct SimpleRouteImpl : public Route { + SimpleRouteImpl(const Config& parent, absl::string_view cluster_name); // Route - bool matches(Network::Connection& connection) const override; + bool matches(Network::Connection&) const override { return true; } const std::string& clusterName() const override { return cluster_name_; } const Router::MetadataMatchCriteria* metadataMatchCriteria() const override { return parent_.metadataMatchCriteria(); } const Config& parent_; - Network::Address::IpList source_ips_; - Network::PortRangeList source_port_ranges_; - Network::Address::IpList destination_ips_; - Network::PortRangeList destination_port_ranges_; std::string cluster_name_; }; @@ -208,7 +201,7 @@ class Config { }; using WeightedClusterEntryConstSharedPtr = std::shared_ptr; - std::vector routes_; + RouteConstSharedPtr default_route_; std::vector weighted_clusters_; uint64_t total_cluster_weight_; std::vector access_logs_; diff --git a/source/common/tcp_proxy/upstream.cc b/source/common/tcp_proxy/upstream.cc index 8a66a0ad8161b..8982729206000 100644 --- a/source/common/tcp_proxy/upstream.cc +++ b/source/common/tcp_proxy/upstream.cc @@ -196,8 +196,14 @@ HttpConnPool::HttpConnPool(Upstream::ThreadLocalCluster& thread_local_cluster, Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks, Http::CodecType type) : config_(config), type_(type), upstream_callbacks_(upstream_callbacks) { - conn_pool_data_ = thread_local_cluster.httpConnPool(Upstream::ResourcePriority::Default, - absl::nullopt, context); + absl::optional protocol; + if (type_ == Http::CodecType::HTTP3) { + protocol = Http::Protocol::Http3; + } else if (type_ == Http::CodecType::HTTP2) { + protocol = Http::Protocol::Http2; + } + conn_pool_data_ = + thread_local_cluster.httpConnPool(Upstream::ResourcePriority::Default, protocol, context); } HttpConnPool::~HttpConnPool() { diff --git a/source/common/tcp_proxy/upstream.h b/source/common/tcp_proxy/upstream.h index 3f2bfd9c67785..017257b5bcaa0 100644 --- a/source/common/tcp_proxy/upstream.h +++ b/source/common/tcp_proxy/upstream.h @@ -53,8 +53,7 @@ class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callba Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks, Http::CodecType type); ~HttpConnPool() override; - // HTTP/3 upstreams are not supported at the moment. - bool valid() const { return conn_pool_data_.has_value() && type_ <= Http::CodecType::HTTP2; } + bool valid() const { return conn_pool_data_.has_value(); } // GenericConnPool void newStream(GenericConnectionPoolCallbacks& callbacks) override; diff --git a/source/common/thread_local/thread_local_impl.cc b/source/common/thread_local/thread_local_impl.cc index baeade40f0c27..99f473a7cc072 100644 --- a/source/common/thread_local/thread_local_impl.cc +++ b/source/common/thread_local/thread_local_impl.cc @@ -16,14 +16,13 @@ namespace ThreadLocal { thread_local InstanceImpl::ThreadLocalData InstanceImpl::thread_local_data_; InstanceImpl::~InstanceImpl() { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(shutdown_); thread_local_data_.data_.clear(); - Thread::MainThread::clear(); } SlotPtr InstanceImpl::allocateSlot() { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); if (free_slot_indexes_.empty()) { @@ -92,7 +91,7 @@ void InstanceImpl::SlotImpl::runOnAllThreads(const UpdateCb& cb) { } void InstanceImpl::SlotImpl::set(InitializeCb cb) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!parent_.shutdown_); for (Event::Dispatcher& dispatcher : parent_.registered_threads_) { @@ -106,7 +105,7 @@ void InstanceImpl::SlotImpl::set(InitializeCb cb) { } void InstanceImpl::registerThread(Event::Dispatcher& dispatcher, bool main_thread) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); if (main_thread) { @@ -120,7 +119,7 @@ void InstanceImpl::registerThread(Event::Dispatcher& dispatcher, bool main_threa } void InstanceImpl::removeSlot(uint32_t slot) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); // When shutting down, we do not post slot removals to other threads. This is because the other // threads have already shut down and the dispatcher is no longer alive. There is also no reason @@ -147,7 +146,7 @@ void InstanceImpl::removeSlot(uint32_t slot) { } void InstanceImpl::runOnAllThreads(Event::PostCb cb) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); for (Event::Dispatcher& dispatcher : registered_threads_) { @@ -159,7 +158,7 @@ void InstanceImpl::runOnAllThreads(Event::PostCb cb) { } void InstanceImpl::runOnAllThreads(Event::PostCb cb, Event::PostCb all_threads_complete_cb) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); // Handle main thread first so that when the last worker thread wins, we could just call the // all_threads_complete_cb method. Parallelism of main thread execution is being traded off @@ -186,7 +185,7 @@ void InstanceImpl::setThreadLocal(uint32_t index, ThreadLocalObjectSharedPtr obj } void InstanceImpl::shutdownGlobalThreading() { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); shutdown_ = true; } diff --git a/source/common/thread_local/thread_local_impl.h b/source/common/thread_local/thread_local_impl.h index f7825b66e151f..26c08dfce3321 100644 --- a/source/common/thread_local/thread_local_impl.h +++ b/source/common/thread_local/thread_local_impl.h @@ -19,7 +19,6 @@ namespace ThreadLocal { */ class InstanceImpl : Logger::Loggable, public NonCopyable, public Instance { public: - InstanceImpl() { Thread::MainThread::initMainThread(); } ~InstanceImpl() override; // ThreadLocal::Instance @@ -78,6 +77,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub static thread_local ThreadLocalData thread_local_data_; + Thread::MainThread main_thread_; std::vector slots_; // A list of index of freed slots. std::list free_slot_indexes_; diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index cca323285187e..36fe37d19be7a 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -87,6 +87,7 @@ envoy_cc_library( "//source/common/upstream:priority_conn_pool_map_impl_lib", "//source/common/upstream:upstream_lib", "//source/common/quic:quic_stat_names_lib", + "//source/server:factory_context_base_impl_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", @@ -106,6 +107,14 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "retry_factory_lib", + hdrs = ["retry_factory.h"], + deps = [ + "//envoy/upstream:retry_interface", + ], +) + envoy_cc_library( name = "conn_pool_map", hdrs = ["conn_pool_map.h"], @@ -408,6 +417,7 @@ envoy_cc_library( hdrs = ["eds.h"], deps = [ ":cluster_factory_lib", + ":leds_lib", ":upstream_includes", "//envoy/config:grpc_mux_interface", "//envoy/config:subscription_factory_interface", @@ -499,6 +509,7 @@ envoy_cc_library( "//envoy/network:listen_socket_interface", "//envoy/ssl:context_interface", "//envoy/upstream:health_checker_interface", + "//source/common/common:dns_utils_lib", "//source/common/common:enum_to_int", "//source/common/common:thread_lib", "//source/common/common:utility_lib", diff --git a/source/common/upstream/cds_api_helper.cc b/source/common/upstream/cds_api_helper.cc index ddfb2f6a78d08..4511391f645ab 100644 --- a/source/common/upstream/cds_api_helper.cc +++ b/source/common/upstream/cds_api_helper.cc @@ -17,10 +17,13 @@ std::vector CdsApiHelper::onConfigUpdate(const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) { - Config::ScopedResume maybe_resume_eds; + Config::ScopedResume maybe_resume_eds_leds; if (cm_.adsMux()) { - const auto type_url = Config::getTypeUrl(); - maybe_resume_eds = cm_.adsMux()->pause(type_url); + // A cluster update pauses sending EDS and LEDS requests. + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + maybe_resume_eds_leds = cm_.adsMux()->pause({eds_type_url, leds_type_url}); } ENVOY_LOG(info, "{}: add {} cluster(s), remove {} cluster(s)", name_, added_resources.size(), diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index 2e90917e28b56..f3f2f2f06e77f 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -113,7 +113,7 @@ ClusterFactoryImplBase::selectDnsResolver(const envoy::config::cluster::v3::Clus resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); } } - return context.dispatcher().createDnsResolver(resolvers, dns_resolver_options); + return context.mainThreadDispatcher().createDnsResolver(resolvers, dns_resolver_options); } return context.dnsResolver(); @@ -127,8 +127,8 @@ ClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluste transport_factory_context = std::make_unique( context.admin(), context.sslContextManager(), *stats_scope, context.clusterManager(), - context.localInfo(), context.dispatcher(), context.stats(), - context.singletonManager(), context.tls(), context.messageValidationVisitor(), + context.localInfo(), context.mainThreadDispatcher(), context.stats(), + context.singletonManager(), context.threadLocal(), context.messageValidationVisitor(), context.api(), context.options()); std::pair new_cluster_pair = @@ -141,13 +141,13 @@ ClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluste } else { new_cluster_pair.first->setHealthChecker(HealthCheckerFactory::create( cluster.health_checks()[0], *new_cluster_pair.first, context.runtime(), - context.dispatcher(), context.logManager(), context.messageValidationVisitor(), + context.mainThreadDispatcher(), context.logManager(), context.messageValidationVisitor(), context.api())); } } new_cluster_pair.first->setOutlierDetector(Outlier::DetectorImplFactory::createForCluster( - *new_cluster_pair.first, cluster, context.dispatcher(), context.runtime(), + *new_cluster_pair.first, cluster, context.mainThreadDispatcher(), context.runtime(), context.outlierEventLogger())); new_cluster_pair.first->setTransportFactoryContext(std::move(transport_factory_context)); diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h index a1a2020cc834e..1ff81b8503285 100644 --- a/source/common/upstream/cluster_factory_impl.h +++ b/source/common/upstream/cluster_factory_impl.h @@ -70,13 +70,13 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { ClusterManager& clusterManager() override { return cluster_manager_; } Stats::Store& stats() override { return stats_; } - ThreadLocal::SlotAllocator& tls() override { return tls_; } + ThreadLocal::SlotAllocator& threadLocal() override { return tls_; } Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; } Ssl::ContextManager& sslContextManager() override { return ssl_context_manager_; } Runtime::Loader& runtime() override { return runtime_; } - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } AccessLog::AccessLogManager& logManager() override { return log_manager_; } - const LocalInfo::LocalInfo& localInfo() override { return local_info_; } + const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } const Server::Options& options() override { return options_; } Server::Admin& admin() override { return admin_; } Singleton::Manager& singletonManager() override { return singleton_manager_; } diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 6e1f4e1b73587..31913283e2d4a 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -182,11 +182,12 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { // If the first CDS response doesn't have any primary cluster, ClusterLoadAssignment // should be already paused by CdsApiImpl::onConfigUpdate(). Need to check that to // avoid double pause ClusterLoadAssignment. - Config::ScopedResume maybe_resume_eds; + Config::ScopedResume maybe_resume_eds_leds; if (cm_.adsMux()) { - const auto type_url = + const auto eds_type_url = Config::getTypeUrl(); - maybe_resume_eds = cm_.adsMux()->pause(type_url); + const auto leds_type_url = Config::getTypeUrl(); + maybe_resume_eds_leds = cm_.adsMux()->pause({eds_type_url, leds_type_url}); } initializeSecondaryClusters(); } @@ -971,6 +972,16 @@ void ClusterManagerImpl::drainConnections() { }); } +void ClusterManagerImpl::checkActiveStaticCluster(const std::string& cluster) { + const auto& it = active_clusters_.find(cluster); + if (it == active_clusters_.end()) { + throw EnvoyException(fmt::format("Unknown gRPC client cluster '{}'", cluster)); + } + if (it->second->added_via_api_) { + throw EnvoyException(fmt::format("gRPC client cluster '{}' is not static", cluster)); + } +} + void ClusterManagerImpl::postThreadLocalRemoveHosts(const Cluster& cluster, const HostVector& hosts_removed) { tls_.runOnAllThreads([name = cluster.info()->name(), @@ -1329,14 +1340,16 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( cluster->lbType(), priority_set_, parent_.local_priority_set_, cluster->stats(), cluster->statsScope(), parent.parent_.runtime_, parent.parent_.random_, cluster->lbSubsetInfo(), cluster->lbRingHashConfig(), cluster->lbMaglevConfig(), - cluster->lbLeastRequestConfig(), cluster->lbConfig()); + cluster->lbRoundRobinConfig(), cluster->lbLeastRequestConfig(), cluster->lbConfig(), + parent_.thread_local_dispatcher_.timeSource()); } else { switch (cluster->lbType()) { case LoadBalancerType::LeastRequest: { ASSERT(lb_factory_ == nullptr); lb_ = std::make_unique( priority_set_, parent_.local_priority_set_, cluster->stats(), parent.parent_.runtime_, - parent.parent_.random_, cluster->lbConfig(), cluster->lbLeastRequestConfig()); + parent.parent_.random_, cluster->lbConfig(), cluster->lbLeastRequestConfig(), + parent.thread_local_dispatcher_.timeSource()); break; } case LoadBalancerType::Random: { @@ -1348,9 +1361,10 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( } case LoadBalancerType::RoundRobin: { ASSERT(lb_factory_ == nullptr); - lb_ = std::make_unique(priority_set_, parent_.local_priority_set_, - cluster->stats(), parent.parent_.runtime_, - parent.parent_.random_, cluster->lbConfig()); + lb_ = std::make_unique( + priority_set_, parent_.local_priority_set_, cluster->stats(), parent.parent_.runtime_, + parent.parent_.random_, cluster->lbConfig(), cluster->lbRoundRobinConfig(), + parent.thread_local_dispatcher_.timeSource()); break; } case LoadBalancerType::ClusterProvided: @@ -1633,8 +1647,9 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::tcpConnPoolIsIdle( ClusterManagerPtr ProdClusterManagerFactory::clusterManagerFromProto( const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { return ClusterManagerPtr{new ClusterManagerImpl( - bootstrap, *this, stats_, tls_, runtime_, local_info_, log_manager_, main_thread_dispatcher_, - admin_, validation_context_, api_, http_context_, grpc_context_, router_context_)}; + bootstrap, *this, stats_, tls_, context_.runtime(), context_.localInfo(), log_manager_, + context_.mainThreadDispatcher(), context_.admin(), validation_context_, context_.api(), + http_context_, grpc_context_, router_context_)}; } Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( @@ -1645,21 +1660,20 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, TimeSource& source, ClusterConnectivityState& state) { - if (protocols.size() == 3 && runtime_.snapshot().featureEnabled("upstream.use_http3", 100)) { + if (protocols.size() == 3 && + context_.runtime().snapshot().featureEnabled("upstream.use_http3", 100)) { ASSERT(contains(protocols, {Http::Protocol::Http11, Http::Protocol::Http2, Http::Protocol::Http3})); - Http::AlternateProtocolsCacheSharedPtr alternate_protocols_cache; - if (alternate_protocol_options.has_value()) { - alternate_protocols_cache = - alternate_protocols_cache_manager_->getCache(alternate_protocol_options.value()); - } + ASSERT(alternate_protocol_options.has_value()); #ifdef ENVOY_ENABLE_QUIC - // TODO(RyanTheOptimist): Plumb an actual alternate protocols cache. + Http::AlternateProtocolsCacheSharedPtr alternate_protocols_cache = + alternate_protocols_cache_manager_->getCache(alternate_protocol_options.value(), + dispatcher); Envoy::Http::ConnectivityGrid::ConnectivityOptions coptions{protocols}; return std::make_unique( - dispatcher, api_.randomGenerator(), host, priority, options, transport_socket_options, - state, source, alternate_protocols_cache, std::chrono::milliseconds(300), coptions, - quic_stat_names_, stats_); + dispatcher, context_.api().randomGenerator(), host, priority, options, + transport_socket_options, state, source, alternate_protocols_cache, + std::chrono::milliseconds(300), coptions, quic_stat_names_, stats_); #else // Should be blocked by configuration checking at an earlier point. NOT_REACHED_GCOVR_EXCL_LINE; @@ -1667,20 +1681,20 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( } if (protocols.size() >= 2) { ASSERT(contains(protocols, {Http::Protocol::Http11, Http::Protocol::Http2})); - return std::make_unique(dispatcher, api_.randomGenerator(), host, - priority, options, - transport_socket_options, state); + return std::make_unique( + dispatcher, context_.api().randomGenerator(), host, priority, options, + transport_socket_options, state); } if (protocols.size() == 1 && protocols[0] == Http::Protocol::Http2 && - runtime_.snapshot().featureEnabled("upstream.use_http2", 100)) { - return Http::Http2::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority, - options, transport_socket_options, state); + context_.runtime().snapshot().featureEnabled("upstream.use_http2", 100)) { + return Http::Http2::allocateConnPool(dispatcher, context_.api().randomGenerator(), host, + priority, options, transport_socket_options, state); } if (protocols.size() == 1 && protocols[0] == Http::Protocol::Http3 && - runtime_.snapshot().featureEnabled("upstream.use_http3", 100)) { + context_.runtime().snapshot().featureEnabled("upstream.use_http3", 100)) { #ifdef ENVOY_ENABLE_QUIC - return Http::Http3::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority, - options, transport_socket_options, state, source, + return Http::Http3::allocateConnPool(dispatcher, context_.api().randomGenerator(), host, + priority, options, transport_socket_options, state, source, quic_stat_names_, stats_); #else UNREFERENCED_PARAMETER(source); @@ -1689,8 +1703,8 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( #endif } ASSERT(protocols.size() == 1 && protocols[0] == Http::Protocol::Http11); - return Http::Http1::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority, options, - transport_socket_options, state); + return Http::Http1::allocateConnPool(dispatcher, context_.api().randomGenerator(), host, priority, + options, transport_socket_options, state); } Tcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool( @@ -1712,12 +1726,12 @@ std::pair ProdClusterManagerFactor const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) { return ClusterFactoryImplBase::create( - cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, - main_thread_dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, - outlier_event_logger, added_via_api, + cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, context_.runtime(), + context_.mainThreadDispatcher(), log_manager_, context_.localInfo(), admin_, + singleton_manager_, outlier_event_logger, added_via_api, added_via_api ? validation_context_.dynamicValidationVisitor() : validation_context_.staticValidationVisitor(), - api_, options_); + context_.api(), context_.options()); } CdsApiPtr diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 1540defd97712..30d58cf5d8def 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -36,6 +36,7 @@ #include "source/common/upstream/load_stats_reporter.h" #include "source/common/upstream/priority_conn_pool_map.h" #include "source/common/upstream/upstream_impl.h" +#include "source/server/factory_context_base_impl.h" namespace Envoy { namespace Upstream { @@ -54,14 +55,14 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { Http::Context& http_context, Grpc::Context& grpc_context, Router::Context& router_context, AccessLog::AccessLogManager& log_manager, Singleton::Manager& singleton_manager, const Server::Options& options, Quic::QuicStatNames& quic_stat_names) - : main_thread_dispatcher_(main_thread_dispatcher), validation_context_(validation_context), - api_(api), http_context_(http_context), grpc_context_(grpc_context), - router_context_(router_context), admin_(admin), runtime_(runtime), stats_(stats), tls_(tls), - dns_resolver_(dns_resolver), ssl_context_manager_(ssl_context_manager), + : context_(options, main_thread_dispatcher, api, local_info, admin, runtime, + singleton_manager, validation_context.staticValidationVisitor(), stats, tls), + validation_context_(validation_context), http_context_(http_context), + grpc_context_(grpc_context), router_context_(router_context), admin_(admin), stats_(stats), + tls_(tls), dns_resolver_(dns_resolver), ssl_context_manager_(ssl_context_manager), local_info_(local_info), secret_manager_(secret_manager), log_manager_(log_manager), - singleton_manager_(singleton_manager), options_(options), quic_stat_names_(quic_stat_names), - alternate_protocols_cache_manager_factory_(singleton_manager, - main_thread_dispatcher.timeSource(), tls_), + singleton_manager_(singleton_manager), quic_stat_names_(quic_stat_names), + alternate_protocols_cache_manager_factory_(singleton_manager, tls_, {context_}), alternate_protocols_cache_manager_(alternate_protocols_cache_manager_factory_.get()) {} // Upstream::ClusterManagerFactory @@ -88,16 +89,15 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { const xds::core::v3::ResourceLocator* cds_resources_locator, ClusterManager& cm) override; Secret::SecretManager& secretManager() override { return secret_manager_; } + Singleton::Manager& singletonManager() override { return singleton_manager_; } protected: - Event::Dispatcher& main_thread_dispatcher_; + Server::FactoryContextBaseImpl context_; ProtobufMessage::ValidationContext& validation_context_; - Api::Api& api_; Http::Context& http_context_; Grpc::Context& grpc_context_; Router::Context& router_context_; Server::Admin& admin_; - Runtime::Loader& runtime_; Stats::Store& stats_; ThreadLocal::Instance& tls_; Network::DnsResolverSharedPtr dns_resolver_; @@ -106,7 +106,6 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { Secret::SecretManager& secret_manager_; AccessLog::AccessLogManager& log_manager_; Singleton::Manager& singleton_manager_; - const Server::Options& options_; Quic::QuicStatNames& quic_stat_names_; Http::AlternateProtocolsCacheManagerFactoryImpl alternate_protocols_cache_manager_factory_; Http::AlternateProtocolsCacheManagerSharedPtr alternate_protocols_cache_manager_; @@ -319,6 +318,8 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable class ConnPoolMap { /** * @return true if the pools are empty. */ - size_t empty() const; + bool empty() const; /** * Destroys all mapped pools. diff --git a/source/common/upstream/conn_pool_map_impl.h b/source/common/upstream/conn_pool_map_impl.h index 6df615ce013cf..1eebd6ec82f4f 100644 --- a/source/common/upstream/conn_pool_map_impl.h +++ b/source/common/upstream/conn_pool_map_impl.h @@ -81,7 +81,7 @@ size_t ConnPoolMap::size() const { } template -size_t ConnPoolMap::empty() const { +bool ConnPoolMap::empty() const { return active_pools_.empty(); } diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index b25bdc07e5824..8460a0ef8b829 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -18,14 +18,14 @@ EdsClusterImpl::EdsClusterImpl( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, factory_context.mainThreadDispatcher().timeSource()), Envoy::Config::SubscriptionBase( factory_context.messageValidationVisitor(), "cluster_name"), - local_info_(factory_context.localInfo()), + factory_context_(factory_context), local_info_(factory_context.localInfo()), cluster_name_(cluster.eds_cluster_config().service_name().empty() ? cluster.name() : cluster.eds_cluster_config().service_name()) { - Event::Dispatcher& dispatcher = factory_context.dispatcher(); + Event::Dispatcher& dispatcher = factory_context.mainThreadDispatcher(); assignment_timeout_ = dispatcher.createTimer([this]() -> void { onAssignmentTimeout(); }); const auto& eds_config = cluster.eds_cluster_config().eds_config(); if (eds_config.config_source_specifier_case() == @@ -51,17 +51,25 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h priority_state_manager.initializePriorityFor(locality_lb_endpoint); - for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { - auto address = parent_.resolveProtoAddress(lb_endpoint.endpoint().address()); - // When the configuration contains duplicate hosts, only the first one will be retained. - if (all_new_hosts.count(address->asString()) > 0) { - continue; + if (locality_lb_endpoint.has_leds_cluster_locality_config()) { + // The locality uses LEDS, fetch its dynamic data, which must be ready, or otherwise + // the batchUpdate method should not have been called. + const auto& leds_config = locality_lb_endpoint.leds_cluster_locality_config(); + + // The batchUpdate call must be performed after all the endpoints of all localities + // were received. + ASSERT(parent_.leds_localities_.find(leds_config) != parent_.leds_localities_.end() && + parent_.leds_localities_[leds_config]->isUpdated()); + for (const auto& [_, lb_endpoint] : + parent_.leds_localities_[leds_config]->getEndpointsMap()) { + updateLocalityEndpoints(lb_endpoint, locality_lb_endpoint, priority_state_manager, + all_new_hosts); + } + } else { + for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { + updateLocalityEndpoints(lb_endpoint, locality_lb_endpoint, priority_state_manager, + all_new_hosts); } - - priority_state_manager.registerHostForPriority(lb_endpoint.endpoint().hostname(), address, - locality_lb_endpoint, lb_endpoint, - parent_.time_source_); - all_new_hosts.emplace(address->asString()); } } @@ -118,6 +126,23 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h parent_.onPreInitComplete(); } +void EdsClusterImpl::BatchUpdateHelper::updateLocalityEndpoints( + const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint, + const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint, + PriorityStateManager& priority_state_manager, absl::flat_hash_set& all_new_hosts) { + const auto address = parent_.resolveProtoAddress(lb_endpoint.endpoint().address()); + // When the configuration contains duplicate hosts, only the first one will be retained. + const auto address_as_string = address->asString(); + if (all_new_hosts.count(address_as_string) > 0) { + return; + } + + priority_state_manager.registerHostForPriority(lb_endpoint.endpoint().hostname(), address, + locality_lb_endpoint, lb_endpoint, + parent_.time_source_); + all_new_hosts.emplace(address_as_string); +} + void EdsClusterImpl::onConfigUpdate(const std::vector& resources, const std::string&) { if (!validateUpdateSize(resources.size())) { @@ -130,6 +155,17 @@ void EdsClusterImpl::onConfigUpdate(const std::vector 0) { + throw EnvoyException(fmt::format( + "A ClusterLoadAssignment for cluster {} cannot include both LEDS (resource: {}) and a " + "list of endpoints.", + cluster_name_, locality.leds_cluster_locality_config().leds_collection_name())); + } + } // Disable timer (if enabled) as we have received new assignment. if (assignment_timeout_->enabled()) { @@ -144,7 +180,69 @@ void EdsClusterImpl::onConfigUpdate(const std::vectorenableTimer(std::chrono::milliseconds(stale_after_ms)); } - BatchUpdateHelper helper(*this, cluster_load_assignment); + // Pause LEDS messages until the EDS config is finished processing. + Config::ScopedResume maybe_resume_leds; + if (factory_context_.clusterManager().adsMux()) { + const auto type_url = Config::getTypeUrl(); + maybe_resume_leds = factory_context_.clusterManager().adsMux()->pause(type_url); + } + + // Compare the current set of LEDS localities (localities using LEDS) to the one received in the + // update. A LEDS locality can either be added, removed, or kept. If it is added we add a + // subscription to it, and if it is removed we delete the subscription. + LedsConfigSet cla_leds_configs; + + for (const auto& locality : cluster_load_assignment.endpoints()) { + if (locality.has_leds_cluster_locality_config()) { + cla_leds_configs.emplace(locality.leds_cluster_locality_config()); + } + } + + // Remove the LEDS localities that are not needed anymore. + absl::erase_if(leds_localities_, [&cla_leds_configs](const auto& item) { + auto const& [leds_config, _] = item; + // Returns true if the leds_config isn't in the cla_leds_configs + return cla_leds_configs.find(leds_config) == cla_leds_configs.end(); + }); + + // In case LEDS is used, store the cluster load assignment as a field + // (optimize for no-copy). + envoy::config::endpoint::v3::ClusterLoadAssignment* used_load_assignment; + if (cla_leds_configs.empty()) { + cluster_load_assignment_ = absl::nullopt; + used_load_assignment = &cluster_load_assignment; + } else { + cluster_load_assignment_ = std::move(cluster_load_assignment); + used_load_assignment = &cluster_load_assignment_.value(); + } + + // Add all the LEDS localities that are new. + for (const auto& leds_config : cla_leds_configs) { + if (leds_localities_.find(leds_config) == leds_localities_.end()) { + ENVOY_LOG(trace, "Found new LEDS config in EDS onConfigUpdate() for cluster {}: {}", + cluster_name_, leds_config.DebugString()); + + // Create a new LEDS subscription and add it to the subscriptions map. + LedsSubscriptionPtr leds_locality_subscription = std::make_unique( + leds_config, cluster_name_, factory_context_, info_->statsScope(), + [&, used_load_assignment]() { + // Called upon an update to the locality. + if (validateAllLedsUpdated()) { + BatchUpdateHelper helper(*this, *used_load_assignment); + priority_set_.batchHostUpdate(helper); + } + }); + leds_localities_.emplace(leds_config, std::move(leds_locality_subscription)); + } + } + + // If all the LEDS localities are updated, the EDS update can occur. If not, then when the last + // LEDS locality will be updated, it will trigger the EDS update helper. + if (!validateAllLedsUpdated()) { + return; + } + + BatchUpdateHelper helper(*this, *used_load_assignment); priority_set_.batchHostUpdate(helper); } @@ -291,6 +389,16 @@ EdsClusterFactory::createClusterImpl( nullptr); } +bool EdsClusterImpl::validateAllLedsUpdated() const { + // Iterate through all LEDS based localities, and if they are all updated return true. + for (const auto& [_, leds_subscription] : leds_localities_) { + if (!leds_subscription->isUpdated()) { + return false; + } + } + return true; +} + /** * Static registration for the Eds cluster factory. @see RegisterFactory. */ diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index f396b5e1785f7..476e783a32a58 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -17,6 +17,7 @@ #include "source/common/config/subscription_base.h" #include "source/common/upstream/cluster_factory_impl.h" +#include "source/common/upstream/leds.h" #include "source/common/upstream/upstream_impl.h" namespace Envoy { @@ -60,6 +61,9 @@ class EdsClusterImpl void startPreInit() override; void onAssignmentTimeout(); + // Returns true iff all the LEDS based localities were updated. + bool validateAllLedsUpdated() const; + class BatchUpdateHelper : public PrioritySet::BatchUpdateCb { public: BatchUpdateHelper( @@ -71,16 +75,34 @@ class EdsClusterImpl void batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) override; private: + void updateLocalityEndpoints( + const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint, + const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint, + PriorityStateManager& priority_state_manager, + absl::flat_hash_set& all_new_hosts); + EdsClusterImpl& parent_; const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment_; }; Config::SubscriptionPtr subscription_; + Server::Configuration::TransportSocketFactoryContextImpl factory_context_; const LocalInfo::LocalInfo& local_info_; const std::string cluster_name_; std::vector locality_weights_map_; Event::TimerPtr assignment_timeout_; InitializePhase initialize_phase_; + using LedsConfigSet = absl::flat_hash_set; + using LedsConfigMap = absl::flat_hash_map; + // Maps between a LEDS configuration (ConfigSource + collection name) to the locality endpoints + // data. + LedsConfigMap leds_localities_; + // TODO(adisuissa): Avoid saving the entire cluster load assignment, only the + // relevant parts of the config for each locality. Note that this field must + // be set when LEDS is used. + absl::optional cluster_load_assignment_; }; using EdsClusterImplSharedPtr = std::shared_ptr; diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index e93505b4d619c..da5963e1945d1 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -219,7 +219,7 @@ void HealthCheckerImplBase::setUnhealthyCrossThread(const HostSharedPtr& host, return; } - session->second->setUnhealthy(envoy::data::core::v3::PASSIVE); + session->second->setUnhealthy(envoy::data::core::v3::PASSIVE, /*retriable=*/false); }); } @@ -338,13 +338,14 @@ bool networkHealthCheckFailureType(envoy::data::core::v3::HealthCheckFailureType } // namespace HealthTransition HealthCheckerImplBase::ActiveHealthCheckSession::setUnhealthy( - envoy::data::core::v3::HealthCheckFailureType type) { + envoy::data::core::v3::HealthCheckFailureType type, bool retriable) { // If we are unhealthy, reset the # of healthy to zero. num_healthy_ = 0; HealthTransition changed_state = HealthTransition::Unchanged; if (!host_->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC)) { - if (!networkHealthCheckFailureType(type) || ++num_unhealthy_ == parent_.unhealthy_threshold_) { + if ((!networkHealthCheckFailureType(type) && !retriable) || + ++num_unhealthy_ == parent_.unhealthy_threshold_) { host_->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); parent_.decHealthy(); changed_state = HealthTransition::Changed; @@ -385,8 +386,8 @@ HealthTransition HealthCheckerImplBase::ActiveHealthCheckSession::setUnhealthy( } void HealthCheckerImplBase::ActiveHealthCheckSession::handleFailure( - envoy::data::core::v3::HealthCheckFailureType type) { - HealthTransition changed_state = setUnhealthy(type); + envoy::data::core::v3::HealthCheckFailureType type, bool retriable) { + HealthTransition changed_state = setUnhealthy(type, retriable); // It's possible that the previous call caused this session to be deferred deleted. if (timeout_timer_ != nullptr) { timeout_timer_->disableTimer(); @@ -401,7 +402,7 @@ HealthTransition HealthCheckerImplBase::ActiveHealthCheckSession::clearPendingFlag(HealthTransition changed_state) { if (host_->healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC)) { host_->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC); - // Even though the health value of the host might have not changed, we set this to Changed to + // Even though the health value of the host might have not changed, we set this to Changed so // that the cluster can update its list of excluded hosts. return HealthTransition::Changed; } diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index 5081aac7351d2..780e8af0777d3 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -76,7 +76,8 @@ class HealthCheckerImplBase : public HealthChecker, class ActiveHealthCheckSession : public Event::DeferredDeletable { public: ~ActiveHealthCheckSession() override; - HealthTransition setUnhealthy(envoy::data::core::v3::HealthCheckFailureType type); + HealthTransition setUnhealthy(envoy::data::core::v3::HealthCheckFailureType type, + bool retriable); void onDeferredDeleteBase(); void start() { onInitialInterval(); } @@ -85,7 +86,7 @@ class HealthCheckerImplBase : public HealthChecker, void handleSuccess(bool degraded = false); void handleDegraded(); - void handleFailure(envoy::data::core::v3::HealthCheckFailureType type); + void handleFailure(envoy::data::core::v3::HealthCheckFailureType type, bool retriable = false); HostSharedPtr host_; diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 2e7a1689b5191..6bb52b6b90ca0 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -69,7 +69,7 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec } Upstream::Cluster& cluster() override { return cluster_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } HealthCheckEventLoggerPtr eventLogger() override { return std::move(event_logger_); } ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { return validation_visitor_; @@ -138,6 +138,7 @@ HttpHealthCheckerImpl::HttpHealthCheckerImpl(const Cluster& cluster, Router::HeaderParser::configure(config.http_health_check().request_headers_to_add(), config.http_health_check().request_headers_to_remove())), http_status_checker_(config.http_health_check().expected_statuses(), + config.http_health_check().retriable_statuses(), static_cast(Http::Code::OK)), codec_client_type_(codecClientType(config.http_health_check().codec_client_type())), random_generator_(random) { @@ -148,37 +149,63 @@ HttpHealthCheckerImpl::HttpHealthCheckerImpl(const Cluster& cluster, HttpHealthCheckerImpl::HttpStatusChecker::HttpStatusChecker( const Protobuf::RepeatedPtrField& expected_statuses, + const Protobuf::RepeatedPtrField& retriable_statuses, uint64_t default_expected_status) { for (const auto& status_range : expected_statuses) { - const auto start = status_range.start(); - const auto end = status_range.end(); + const auto start = static_cast(status_range.start()); + const auto end = static_cast(status_range.end()); - if (start >= end) { - throw EnvoyException(fmt::format( - "Invalid http status range: expecting start < end, but found start={} and end={}", start, - end)); - } + validateRange(start, end, "expected"); - if (start < 100) { - throw EnvoyException(fmt::format( - "Invalid http status range: expecting start >= 100, but found start={}", start)); - } + expected_ranges_.emplace_back(std::make_pair(start, end)); + } - if (end > 600) { - throw EnvoyException( - fmt::format("Invalid http status range: expecting end <= 600, but found end={}", end)); - } + if (expected_ranges_.empty()) { + expected_ranges_.emplace_back( + std::make_pair(default_expected_status, default_expected_status + 1)); + } + + for (const auto& status_range : retriable_statuses) { + const auto start = static_cast(status_range.start()); + const auto end = static_cast(status_range.end()); + + validateRange(start, end, "retriable"); + + retriable_ranges_.emplace_back(std::make_pair(start, end)); + } +} + +void HttpHealthCheckerImpl::HttpStatusChecker::validateRange(uint64_t start, uint64_t end, + absl::string_view range_type) { + if (start >= end) { + throw EnvoyException(fmt::format("Invalid http {} status range: expecting start < " + "end, but found start={} and end={}", + range_type, start, end)); + } - ranges_.emplace_back(std::make_pair(static_cast(start), static_cast(end))); + if (start < 100) { + throw EnvoyException( + fmt::format("Invalid http {} status range: expecting start >= 100, but found start={}", + range_type, start)); } - if (ranges_.empty()) { - ranges_.emplace_back(std::make_pair(default_expected_status, default_expected_status + 1)); + if (end > 600) { + throw EnvoyException(fmt::format( + "Invalid http {} status range: expecting end <= 600, but found end={}", range_type, end)); } } -bool HttpHealthCheckerImpl::HttpStatusChecker::inRange(uint64_t http_status) const { - for (const auto& range : ranges_) { +bool HttpHealthCheckerImpl::HttpStatusChecker::inRetriableRanges(uint64_t http_status) const { + return inRanges(http_status, retriable_ranges_); +} + +bool HttpHealthCheckerImpl::HttpStatusChecker::inExpectedRanges(uint64_t http_status) const { + return inRanges(http_status, expected_ranges_); +} + +bool HttpHealthCheckerImpl::HttpStatusChecker::inRanges( + uint64_t http_status, const std::vector>& ranges) { + for (const auto& range : ranges) { if (http_status >= range.first && http_status < range.second) { return true; } @@ -331,7 +358,7 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() { ENVOY_CONN_LOG(debug, "hc response={} health_flags={}", *client_, response_code, HostUtility::healthFlagsToString(*host_)); - if (!parent_.http_status_checker_.inRange(response_code)) { + if (!parent_.http_status_checker_.inExpectedRanges(response_code)) { // If the HTTP response code would indicate failure AND the immediate health check // failure header is set, exclude the host from LB. // TODO(mattklein123): We could consider doing this check for any HTTP response code, but this @@ -341,7 +368,12 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() { if (response_headers_->EnvoyImmediateHealthCheckFail() != nullptr) { host_->healthFlagSet(Host::HealthFlag::EXCLUDED_VIA_IMMEDIATE_HC_FAIL); } - return HealthCheckResult::Failed; + + if (parent_.http_status_checker_.inRetriableRanges(response_code)) { + return HealthCheckResult::Retriable; + } else { + return HealthCheckResult::Failed; + } } const auto degraded = response_headers_->EnvoyDegraded() != nullptr; @@ -374,7 +406,10 @@ void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onResponseComplete() { handleSuccess(true); break; case HealthCheckResult::Failed: - handleFailure(envoy::data::core::v3::ACTIVE); + handleFailure(envoy::data::core::v3::ACTIVE, /*retriable=*/false); + break; + case HealthCheckResult::Retriable: + handleFailure(envoy::data::core::v3::ACTIVE, /*retriable=*/true); break; } diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index 35a564f6118b9..cb8d62a3d4a61 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -62,12 +62,19 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { public: HttpStatusChecker( const Protobuf::RepeatedPtrField& expected_statuses, + const Protobuf::RepeatedPtrField& retriable_statuses, uint64_t default_expected_status); - bool inRange(uint64_t http_status) const; + bool inRetriableRanges(uint64_t http_status) const; + bool inExpectedRanges(uint64_t http_status) const; private: - std::vector> ranges_; + static bool inRanges(uint64_t http_status, + const std::vector>& ranges); + static void validateRange(uint64_t start, uint64_t end, absl::string_view range_type); + + std::vector> expected_ranges_; + std::vector> retriable_ranges_; }; private: @@ -78,7 +85,7 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { ~HttpActiveHealthCheckSession() override; void onResponseComplete(); - enum class HealthCheckResult { Succeeded, Degraded, Failed }; + enum class HealthCheckResult { Succeeded, Degraded, Failed, Retriable }; HealthCheckResult healthCheckResult(); bool shouldClose() const; diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 52619e57f0391..5cd361b1da589 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -11,6 +11,7 @@ #include "envoy/upstream/upstream.h" #include "source/common/common/assert.h" +#include "source/common/common/logger.h" #include "source/common/protobuf/utility.h" #include "absl/container/fixed_array.h" @@ -754,10 +755,21 @@ const HostVector& ZoneAwareLoadBalancerBase::hostSourceToHosts(HostsSource hosts EdfLoadBalancerBase::EdfLoadBalancerBase( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + const absl::optional slow_start_config, + TimeSource& time_source) : ZoneAwareLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, common_config), - seed_(random_.random()) { + seed_(random_.random()), + slow_start_window_(slow_start_config.has_value() + ? std::chrono::milliseconds(DurationUtil::durationToMilliseconds( + slow_start_config.value().slow_start_window())) + : std::chrono::milliseconds(0)), + aggression_runtime_( + slow_start_config.has_value() && slow_start_config.value().has_aggression() + ? absl::optional({slow_start_config.value().aggression(), runtime}) + : absl::nullopt), + time_source_(time_source), latest_host_added_time_(time_source_.monotonicTime()) { // We fully recompute the schedulers for a given host set here on membership change, which is // consistent with what other LB implementations do (e.g. thread aware). // The downside of a full recompute is that time complexity is O(n * log n), @@ -765,6 +777,12 @@ EdfLoadBalancerBase::EdfLoadBalancerBase( // https://github.com/envoyproxy/envoy/issues/2874). priority_update_cb_ = priority_set.addPriorityUpdateCb( [this](uint32_t priority, const HostVector&, const HostVector&) { refresh(priority); }); + member_update_cb_ = priority_set.addMemberUpdateCb( + [this](const HostVector& hosts_added, const HostVector&) -> void { + if (isSlowStartEnabled()) { + recalculateHostsInSlowStart(hosts_added); + } + }); } void EdfLoadBalancerBase::initialize() { @@ -773,20 +791,38 @@ void EdfLoadBalancerBase::initialize() { } } +void EdfLoadBalancerBase::recalculateHostsInSlowStart(const HostVector& hosts) { + auto current_time = time_source_.monotonicTime(); + // TODO(nezdolik): linear scan can be improved with using flat hash set for hosts in slow start. + for (const auto& host : hosts) { + auto host_create_duration = + std::chrono::duration_cast(current_time - host->creationTime()); + // Check if host existence time is within slow start window. + if (host->creationTime() > latest_host_added_time_ && + host_create_duration <= slow_start_window_ && + host->health() == Upstream::Host::Health::Healthy) { + latest_host_added_time_ = host->creationTime(); + } + } +} + void EdfLoadBalancerBase::refresh(uint32_t priority) { const auto add_hosts_source = [this](HostsSource source, const HostVector& hosts) { // Nuke existing scheduler if it exists. auto& scheduler = scheduler_[source] = Scheduler{}; refreshHostSource(source); + if (isSlowStartEnabled()) { + recalculateHostsInSlowStart(hosts); + } - // Check if the original host weights are equal and skip EDF creation if they are. When all - // original weights are equal we can rely on unweighted host pick to do optimal round robin and - // least-loaded host selection with lower memory and CPU overhead. - if (hostWeightsAreEqual(hosts)) { + // Check if the original host weights are equal and no hosts are in slow start mode, in that + // case EDF creation is skipped. When all original weights are equal and no hosts are in slow + // start mode we can rely on unweighted host pick to do optimal round robin and least-loaded + // host selection with lower memory and CPU overhead. + if (hostWeightsAreEqual(hosts) && noHostsAreInSlowStart()) { // Skip edf creation. return; } - scheduler.edf_ = std::make_unique>(); // Populate scheduler with host list. @@ -812,7 +848,6 @@ void EdfLoadBalancerBase::refresh(uint32_t priority) { } } }; - // Populate EdfSchedulers for each valid HostsSource value for the host set at this priority. const auto& host_set = priority_set_.hostSetsPerPriority()[priority]; add_hosts_source(HostsSource(priority, HostsSource::SourceType::AllHosts), host_set->hosts()); @@ -834,6 +869,22 @@ void EdfLoadBalancerBase::refresh(uint32_t priority) { } } +bool EdfLoadBalancerBase::isSlowStartEnabled() { + return slow_start_window_ > std::chrono::milliseconds(0); +} + +bool EdfLoadBalancerBase::noHostsAreInSlowStart() { + if (!isSlowStartEnabled()) { + return true; + } + auto current_time = time_source_.monotonicTime(); + if (std::chrono::duration_cast( + current_time - latest_host_added_time_) <= slow_start_window_) { + return false; + } + return true; +} + HostConstSharedPtr EdfLoadBalancerBase::peekAnotherHost(LoadBalancerContext* context) { if (tooManyPreconnects(stashed_random_.size(), total_healthy_hosts_)) { return nullptr; @@ -892,6 +943,36 @@ HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* cont } } +double EdfLoadBalancerBase::applyAggressionFactor(double time_factor) { + if (aggression_ == 1.0 || time_factor == 1.0) { + return time_factor; + } else { + return std::pow(time_factor, 1.0 / aggression_); + } +} + +double EdfLoadBalancerBase::applySlowStartFactor(double host_weight, const Host& host) { + auto host_create_duration = std::chrono::duration_cast( + time_source_.monotonicTime() - host.creationTime()); + if (host_create_duration < slow_start_window_ && + host.health() == Upstream::Host::Health::Healthy) { + aggression_ = aggression_runtime_ != absl::nullopt ? aggression_runtime_.value().value() : 1.0; + if (aggression_ < 0.0) { + ENVOY_LOG_EVERY_POW_2(error, "Invalid runtime value provided for aggression parameter, " + "agression cannot be less than 0.0"); + } + aggression_ = std::max(0.0, aggression_); + + ASSERT(aggression_ > 0.0); + auto time_factor = static_cast(std::max(std::chrono::milliseconds(1).count(), + host_create_duration.count())) / + slow_start_window_.count(); + return host_weight * applyAggressionFactor(time_factor); + } else { + return host_weight; + } +} + HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPeek(const HostVector&, const HostsSource&) { // LeastRequestLoadBalancer can not do deterministic preconnecting, because @@ -903,11 +984,13 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPeek(const HostVector HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector& hosts_to_use, const HostsSource&) { HostSharedPtr candidate_host = nullptr; + for (uint32_t choice_idx = 0; choice_idx < choice_count_; ++choice_idx) { const int rand_idx = random_.random() % hosts_to_use.size(); HostSharedPtr sampled_host = hosts_to_use[rand_idx]; if (candidate_host == nullptr) { + // Make a first choice to start the comparisons. candidate_host = sampled_host; continue; diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index f38e3f5765167..6cd0c3710920e 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -387,12 +387,15 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { * This base class also supports unweighted selection which derived classes can use to customize * behavior. Derived classes can also override how host weight is determined when in weighted mode. */ -class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { +class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase, + Logger::Loggable { public: - EdfLoadBalancerBase(const PrioritySet& priority_set, const PrioritySet* local_priority_set, - ClusterStats& stats, Runtime::Loader& runtime, - Random::RandomGenerator& random, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); + EdfLoadBalancerBase( + const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, + Runtime::Loader& runtime, Random::RandomGenerator& random, + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + const absl::optional slow_start_cofig, + TimeSource& time_source); // Upstream::ZoneAwareLoadBalancerBase HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) override; @@ -410,6 +413,11 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { virtual void refresh(uint32_t priority); + bool isSlowStartEnabled(); + bool noHostsAreInSlowStart(); + + virtual void recalculateHostsInSlowStart(const HostVector& hosts_added); + // Seed to allow us to desynchronize load balancers across a fleet. If we don't // do this, multiple Envoys that receive an update at the same time (or even // multiple load balancers on the same host) will send requests to @@ -417,7 +425,11 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { // overload. const uint64_t seed_; + double applyAggressionFactor(double time_factor); + double applySlowStartFactor(double host_weight, const Host& host); + private: + friend class EdfLoadBalancerBasePeer; virtual void refreshHostSource(const HostsSource& source) PURE; virtual double hostWeight(const Host& host) PURE; virtual HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use, @@ -428,6 +440,15 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { // Scheduler for each valid HostsSource. absl::node_hash_map scheduler_; Common::CallbackHandlePtr priority_update_cb_; + Common::CallbackHandlePtr member_update_cb_; + +protected: + // Slow start related config + const std::chrono::milliseconds slow_start_window_; + double aggression_{1.0}; + const absl::optional aggression_runtime_; + TimeSource& time_source_; + MonotonicTime latest_host_added_time_; }; /** @@ -436,12 +457,20 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { */ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { public: - RoundRobinLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, - ClusterStats& stats, Runtime::Loader& runtime, - Random::RandomGenerator& random, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) - : EdfLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, - common_config) { + RoundRobinLoadBalancer( + const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, + Runtime::Loader& runtime, Random::RandomGenerator& random, + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + const absl::optional + round_robin_config, + TimeSource& time_source) + : EdfLoadBalancerBase( + priority_set, local_priority_set, stats, runtime, random, common_config, + (round_robin_config.has_value() && round_robin_config.value().has_slow_start_config()) + ? absl::optional( + round_robin_config.value().slow_start_config()) + : absl::nullopt, + time_source) { initialize(); } @@ -455,7 +484,13 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { // index. peekahead_index_ = 0; } - double hostWeight(const Host& host) override { return host.weight(); } + double hostWeight(const Host& host) override { + if (!noHostsAreInSlowStart()) { + return applySlowStartFactor(host.weight(), host); + } + return host.weight(); + } + HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use, const HostsSource& source) override { auto i = rr_indexes_.find(source); @@ -498,37 +533,45 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { * The benefit of the Maglev table is at the expense of resolution, memory usage is capped. * Additionally, the Maglev table can be shared amongst all threads. */ -class LeastRequestLoadBalancer : public EdfLoadBalancerBase, - Logger::Loggable { +class LeastRequestLoadBalancer : public EdfLoadBalancerBase { public: LeastRequestLoadBalancer( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, const absl::optional - least_request_config) - : EdfLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, - common_config), + least_request_config, + TimeSource& time_source) + : EdfLoadBalancerBase( + priority_set, local_priority_set, stats, runtime, random, common_config, + (least_request_config.has_value() && + least_request_config.value().has_slow_start_config()) + ? absl::optional( + least_request_config.value().slow_start_config()) + : absl::nullopt, + time_source), choice_count_( least_request_config.has_value() ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.value(), choice_count, 2) : 2), active_request_bias_runtime_( least_request_config.has_value() && least_request_config->has_active_request_bias() - ? std::make_unique(least_request_config->active_request_bias(), - runtime) - : nullptr) { + ? absl::optional( + {least_request_config->active_request_bias(), runtime}) + : absl::nullopt) { initialize(); } protected: void refresh(uint32_t priority) override { - active_request_bias_ = - active_request_bias_runtime_ != nullptr ? active_request_bias_runtime_->value() : 1.0; + active_request_bias_ = active_request_bias_runtime_ != absl::nullopt + ? active_request_bias_runtime_.value().value() + : 1.0; if (active_request_bias_ < 0.0) { - ENVOY_LOG(warn, "upstream: invalid active request bias supplied (runtime key {}), using 1.0", - active_request_bias_runtime_->runtimeKey()); + ENVOY_LOG_MISC(warn, + "upstream: invalid active request bias supplied (runtime key {}), using 1.0", + active_request_bias_runtime_->runtimeKey()); active_request_bias_ = 1.0; } @@ -555,16 +598,21 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase, // // It might be possible to do better by picking two hosts off of the schedule, and selecting the // one with fewer active requests at the time of selection. - if (active_request_bias_ == 0.0) { - return host.weight(); - } + + double host_weight = static_cast(host.weight()); if (active_request_bias_ == 1.0) { - return static_cast(host.weight()) / (host.stats().rq_active_.value() + 1); + host_weight = static_cast(host.weight()) / (host.stats().rq_active_.value() + 1); + } else if (active_request_bias_ != 0.0) { + host_weight = static_cast(host.weight()) / + std::pow(host.stats().rq_active_.value() + 1, active_request_bias_); } - return static_cast(host.weight()) / - std::pow(host.stats().rq_active_.value() + 1, active_request_bias_); + if (!noHostsAreInSlowStart()) { + return applySlowStartFactor(host_weight, host); + } else { + return host_weight; + } } HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use, const HostsSource& source) override; @@ -578,13 +626,14 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase, // whenever a `HostSet` is updated. double active_request_bias_{}; - const std::unique_ptr active_request_bias_runtime_; + const absl::optional active_request_bias_runtime_; }; /** * Random load balancer that picks a random host out of all hosts. */ -class RandomLoadBalancer : public ZoneAwareLoadBalancerBase { +class RandomLoadBalancer : public ZoneAwareLoadBalancerBase, + Logger::Loggable { public: RandomLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, diff --git a/source/common/upstream/logical_dns_cluster.cc b/source/common/upstream/logical_dns_cluster.cc index ca9c9a809664b..c51bc3c94386d 100644 --- a/source/common/upstream/logical_dns_cluster.cc +++ b/source/common/upstream/logical_dns_cluster.cc @@ -49,13 +49,13 @@ LogicalDnsCluster::LogicalDnsCluster( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, - factory_context.dispatcher().timeSource()), + factory_context.mainThreadDispatcher().timeSource()), dns_resolver_(dns_resolver), dns_refresh_rate_ms_( std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(cluster, dns_refresh_rate, 5000))), respect_dns_ttl_(cluster.respect_dns_ttl()), resolve_timer_( - factory_context.dispatcher().createTimer([this]() -> void { startResolve(); })), + factory_context.mainThreadDispatcher().createTimer([this]() -> void { startResolve(); })), local_info_(factory_context.localInfo()), load_assignment_(convertPriority(cluster.load_assignment())) { failure_backoff_strategy_ = diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index 9cf6887aff530..00561a544a165 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -110,8 +110,8 @@ OriginalDstCluster::OriginalDstCluster( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : ClusterImplBase(config, runtime, factory_context, std::move(stats_scope), added_via_api, - factory_context.dispatcher().timeSource()), - dispatcher_(factory_context.dispatcher()), + factory_context.mainThreadDispatcher().timeSource()), + dispatcher_(factory_context.mainThreadDispatcher()), cleanup_interval_ms_( std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, cleanup_interval, 5000))), cleanup_timer_(dispatcher_.createTimer([this]() -> void { cleanup(); })), diff --git a/source/common/upstream/retry_factory.h b/source/common/upstream/retry_factory.h new file mode 100644 index 0000000000000..7c335116cb663 --- /dev/null +++ b/source/common/upstream/retry_factory.h @@ -0,0 +1,21 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +namespace Envoy { +namespace Upstream { + +class RetryExtensionFactoryContextImpl : public Upstream::RetryExtensionFactoryContext { +public: + RetryExtensionFactoryContextImpl(Singleton::Manager& singleton_manager) + : singleton_manager_(singleton_manager) {} + + // Upstream::RetryOptionsPredicateFactoryContext + Singleton::Manager& singletonManager() override { return singleton_manager_; } + +private: + Singleton::Manager& singleton_manager_; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/static_cluster.cc b/source/common/upstream/static_cluster.cc index a8741f7a8f592..b1656fe2ee698 100644 --- a/source/common/upstream/static_cluster.cc +++ b/source/common/upstream/static_cluster.cc @@ -12,7 +12,7 @@ StaticClusterImpl::StaticClusterImpl( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, - factory_context.dispatcher().timeSource()), + factory_context.mainThreadDispatcher().timeSource()), priority_state_manager_( new PriorityStateManager(*this, factory_context.localInfo(), nullptr)) { const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment = @@ -20,11 +20,17 @@ StaticClusterImpl::StaticClusterImpl( overprovisioning_factor_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( cluster_load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); - Event::Dispatcher& dispatcher = factory_context.dispatcher(); + Event::Dispatcher& dispatcher = factory_context.mainThreadDispatcher(); for (const auto& locality_lb_endpoint : cluster_load_assignment.endpoints()) { validateEndpointsForZoneAwareRouting(locality_lb_endpoint); priority_state_manager_->initializePriorityFor(locality_lb_endpoint); + // TODO(adisuissa): Implement LEDS support for STATIC clusters. + if (locality_lb_endpoint.has_leds_cluster_locality_config()) { + throw EnvoyException( + fmt::format("LEDS is only supported when EDS is used. Static cluster {} cannot use LEDS.", + cluster.name())); + } for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { priority_state_manager_->registerHostForPriority( lb_endpoint.endpoint().hostname(), resolveProtoAddress(lb_endpoint.endpoint().address()), diff --git a/source/common/upstream/strict_dns_cluster.cc b/source/common/upstream/strict_dns_cluster.cc index 0d03ea0a00f63..44cb3e83485a1 100644 --- a/source/common/upstream/strict_dns_cluster.cc +++ b/source/common/upstream/strict_dns_cluster.cc @@ -14,7 +14,7 @@ StrictDnsClusterImpl::StrictDnsClusterImpl( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, factory_context.mainThreadDispatcher().timeSource()), load_assignment_(cluster.load_assignment()), local_info_(factory_context.localInfo()), dns_resolver_(dns_resolver), dns_refresh_rate_ms_( @@ -37,8 +37,8 @@ StrictDnsClusterImpl::StrictDnsClusterImpl( const std::string& url = fmt::format("tcp://{}:{}", socket_address.address(), socket_address.port_value()); - resolve_targets.emplace_back(new ResolveTarget(*this, factory_context.dispatcher(), url, - locality_lb_endpoint, lb_endpoint)); + resolve_targets.emplace_back(new ResolveTarget(*this, factory_context.mainThreadDispatcher(), + url, locality_lb_endpoint, lb_endpoint)); } } resolve_targets_ = std::move(resolve_targets); diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index 4c5a420a94501..b5bf551cc667b 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -26,19 +26,23 @@ SubsetLoadBalancer::SubsetLoadBalancer( const absl::optional& lb_ring_hash_config, const absl::optional& lb_maglev_config, + const absl::optional& + round_robin_config, const absl::optional& least_request_config, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + TimeSource& time_source) : lb_type_(lb_type), lb_ring_hash_config_(lb_ring_hash_config), - lb_maglev_config_(lb_maglev_config), least_request_config_(least_request_config), - common_config_(common_config), stats_(stats), scope_(scope), runtime_(runtime), - random_(random), fallback_policy_(subsets.fallbackPolicy()), + lb_maglev_config_(lb_maglev_config), round_robin_config_(round_robin_config), + least_request_config_(least_request_config), common_config_(common_config), stats_(stats), + scope_(scope), runtime_(runtime), random_(random), fallback_policy_(subsets.fallbackPolicy()), default_subset_metadata_(subsets.defaultSubset().fields().begin(), subsets.defaultSubset().fields().end()), subset_selectors_(subsets.subsetSelectors()), original_priority_set_(priority_set), original_local_priority_set_(local_priority_set), locality_weight_aware_(subsets.localityWeightAware()), - scale_locality_weight_(subsets.scaleLocalityWeight()), list_as_any_(subsets.listAsAny()) { + scale_locality_weight_(subsets.scaleLocalityWeight()), list_as_any_(subsets.listAsAny()), + time_source_(time_source) { ASSERT(subsets.isEnabled()); if (fallback_policy_ != envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK) { @@ -751,7 +755,8 @@ SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalan case LoadBalancerType::LeastRequest: lb_ = std::make_unique( *this, subset_lb.original_local_priority_set_, subset_lb.stats_, subset_lb.runtime_, - subset_lb.random_, subset_lb.common_config_, subset_lb.least_request_config_); + subset_lb.random_, subset_lb.common_config_, subset_lb.least_request_config_, + subset_lb.time_source_); break; case LoadBalancerType::Random: @@ -761,9 +766,10 @@ SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalan break; case LoadBalancerType::RoundRobin: - lb_ = std::make_unique(*this, subset_lb.original_local_priority_set_, - subset_lb.stats_, subset_lb.runtime_, - subset_lb.random_, subset_lb.common_config_); + lb_ = std::make_unique( + *this, subset_lb.original_local_priority_set_, subset_lb.stats_, subset_lb.runtime_, + subset_lb.random_, subset_lb.common_config_, subset_lb.round_robin_config_, + subset_lb.time_source_); break; case LoadBalancerType::RingHash: diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index 354341ff060cf..1be1830aa5877 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -30,9 +30,12 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable& lb_ring_hash_config, const absl::optional& lb_maglev_config, + const absl::optional& + round_robin_config, const absl::optional& least_request_config, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + TimeSource& time_source); ~SubsetLoadBalancer() override; // Upstream::LoadBalancer @@ -239,6 +242,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable lb_ring_hash_config_; const absl::optional lb_maglev_config_; + const absl::optional round_robin_config_; const absl::optional least_request_config_; const envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; @@ -280,6 +284,8 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable void { - refresh(); - threadSafeSetCrossPriorityHostMap(priority_set_.crossPriorityHostMap()); - }); + [this](uint32_t, const HostVector&, const HostVector&) -> void { refresh(); }); refresh(); } @@ -134,6 +131,7 @@ void ThreadAwareLoadBalancerBase::refresh() { factory_->healthy_per_priority_load_ = healthy_per_priority_load; factory_->degraded_per_priority_load_ = degraded_per_priority_load; factory_->per_priority_state_ = per_priority_state_vector; + factory_->cross_priority_host_map_ = priority_set_.crossPriorityHostMap(); } } @@ -181,8 +179,7 @@ ThreadAwareLoadBalancerBase::LoadBalancerImpl::chooseHost(LoadBalancerContext* c } LoadBalancerPtr ThreadAwareLoadBalancerBase::LoadBalancerFactoryImpl::create() { - auto lb = std::make_unique( - stats_, random_, thread_aware_lb_.threadSafeGetCrossPriorityHostMap()); + auto lb = std::make_unique(stats_, random_); // We must protect current_lb_ via a RW lock since it is accessed and written to by multiple // threads. All complex processing has already been precalculated however. @@ -190,6 +187,7 @@ LoadBalancerPtr ThreadAwareLoadBalancerBase::LoadBalancerFactoryImpl::create() { lb->healthy_per_priority_load_ = healthy_per_priority_load_; lb->degraded_per_priority_load_ = degraded_per_priority_load_; lb->per_priority_state_ = per_priority_state_; + lb->cross_priority_host_map_ = cross_priority_host_map_; return lb; } diff --git a/source/common/upstream/thread_aware_lb_impl.h b/source/common/upstream/thread_aware_lb_impl.h index 81a1e5e2e4c83..fa26abddf98b4 100644 --- a/source/common/upstream/thread_aware_lb_impl.h +++ b/source/common/upstream/thread_aware_lb_impl.h @@ -98,7 +98,7 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : LoadBalancerBase(priority_set, stats, runtime, random, common_config), - factory_(new LoadBalancerFactoryImpl(stats, random, *this)) {} + factory_(new LoadBalancerFactoryImpl(stats, random)) {} private: struct PerPriorityState { @@ -108,9 +108,8 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL using PerPriorityStatePtr = std::unique_ptr; struct LoadBalancerImpl : public LoadBalancer { - LoadBalancerImpl(ClusterStats& stats, Random::RandomGenerator& random, - HostMapConstSharedPtr host_map) - : stats_(stats), random_(random), cross_priority_host_map_(std::move(host_map)) {} + LoadBalancerImpl(ClusterStats& stats, Random::RandomGenerator& random) + : stats_(stats), random_(random) {} // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; @@ -128,15 +127,12 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL }; struct LoadBalancerFactoryImpl : public LoadBalancerFactory { - LoadBalancerFactoryImpl(ClusterStats& stats, Random::RandomGenerator& random, - ThreadAwareLoadBalancerBase& thread_aware_lb) - : thread_aware_lb_(thread_aware_lb), stats_(stats), random_(random) {} + LoadBalancerFactoryImpl(ClusterStats& stats, Random::RandomGenerator& random) + : stats_(stats), random_(random) {} // Upstream::LoadBalancerFactory LoadBalancerPtr create() override; - ThreadAwareLoadBalancerBase& thread_aware_lb_; - ClusterStats& stats_; Random::RandomGenerator& random_; absl::Mutex mutex_; @@ -144,6 +140,16 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL // This is split out of PerPriorityState so LoadBalancerBase::ChoosePriority can be reused. std::shared_ptr healthy_per_priority_load_ ABSL_GUARDED_BY(mutex_); std::shared_ptr degraded_per_priority_load_ ABSL_GUARDED_BY(mutex_); + + // Whenever the membership changes, the cross_priority_host_map_ will be updated automatically. + // And all workers will create a new worker local load balancer and copy the + // cross_priority_host_map_. + // This leads to the possibility of simultaneous reading and writing of cross_priority_host_map_ + // in different threads. For this reason, mutex is necessary to guard cross_priority_host_map_. + // + // Cross priority host map for fast cross priority host searching. When the priority update + // callback is executed, the host map will also be updated. + HostMapConstSharedPtr cross_priority_host_map_ ABSL_GUARDED_BY(mutex_); }; virtual HashingLoadBalancerSharedPtr @@ -151,29 +157,8 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL double min_normalized_weight, double max_normalized_weight) PURE; void refresh(); - void threadSafeSetCrossPriorityHostMap(HostMapConstSharedPtr host_map) { - absl::MutexLock ml(&cross_priority_host_map_mutex_); - cross_priority_host_map_ = std::move(host_map); - } - HostMapConstSharedPtr threadSafeGetCrossPriorityHostMap() { - absl::MutexLock ml(&cross_priority_host_map_mutex_); - return cross_priority_host_map_; - } - std::shared_ptr factory_; Common::CallbackHandlePtr priority_update_cb_; - - // Whenever the membership changes, the cross_priority_host_map_ will be updated automatically. - // And all workers will create a new worker local load balancer and copy the - // cross_priority_host_map_. - // - // This leads to the possibility of simultaneous reading and writing of cross_priority_host_map_ - // in different threads. For this reason, an additional mutex is necessary to guard - // cross_priority_host_map_. - absl::Mutex cross_priority_host_map_mutex_; - // Cross priority host map for fast cross priority host searching. When the priority update - // callback is executed, the host map will also be updated. - HostMapConstSharedPtr cross_priority_host_map_ ABSL_GUARDED_BY(cross_priority_host_map_mutex_); }; } // namespace Upstream diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 2eec93be78914..ab0e0c76a4fe3 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -676,17 +676,19 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { // other contexts taken from TransportSocketFactoryContext. FactoryContextImpl(Stats::Scope& stats_scope, Envoy::Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContext& c) - : admin_(c.admin()), stats_scope_(stats_scope), cluster_manager_(c.clusterManager()), - local_info_(c.localInfo()), dispatcher_(c.dispatcher()), runtime_(runtime), + : admin_(c.admin()), server_scope_(c.stats()), stats_scope_(stats_scope), + cluster_manager_(c.clusterManager()), local_info_(c.localInfo()), + dispatcher_(c.mainThreadDispatcher()), runtime_(runtime), singleton_manager_(c.singletonManager()), tls_(c.threadLocal()), api_(c.api()), options_(c.options()), message_validation_visitor_(c.messageValidationVisitor()) {} Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } const Server::Options& options() override { return options_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } Stats::Scope& scope() override { return stats_scope_; } + Stats::Scope& serverScope() override { return server_scope_; } Singleton::Manager& singletonManager() override { return singleton_manager_; } ThreadLocal::SlotAllocator& threadLocal() override { return tls_; } Server::Admin& admin() override { return admin_; } @@ -719,6 +721,7 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { private: Server::Admin& admin_; + Stats::Scope& server_scope_; Stats::Scope& stats_scope_; Upstream::ClusterManager& cluster_manager_; const LocalInfo::LocalInfo& local_info_; @@ -1017,14 +1020,14 @@ ClusterImplBase::ClusterImplBase( local_cluster_(factory_context.clusterManager().localClusterName().value_or("") == cluster.name()), const_metadata_shared_pool_(Config::Metadata::getConstMetadataSharedPool( - factory_context.singletonManager(), factory_context.dispatcher())) { + factory_context.singletonManager(), factory_context.mainThreadDispatcher())) { factory_context.setInitManager(init_manager_); auto socket_factory = createTransportSocketFactory(cluster, factory_context); auto* raw_factory_pointer = socket_factory.get(); auto socket_matcher = std::make_unique( cluster.transport_socket_matches(), factory_context, socket_factory, *stats_scope); - auto& dispatcher = factory_context.dispatcher(); + auto& dispatcher = factory_context.mainThreadDispatcher(); info_ = std::shared_ptr( new ClusterInfoImpl(cluster, factory_context.clusterManager().bindConfig(), runtime, std::move(socket_matcher), std::move(stats_scope), added_via_api, @@ -1767,6 +1770,8 @@ getDnsLookupFamilyFromEnum(envoy::config::cluster::v3::Cluster::DnsLookupFamily return Network::DnsLookupFamily::V4Only; case envoy::config::cluster::v3::Cluster::AUTO: return Network::DnsLookupFamily::Auto; + case envoy::config::cluster::v3::Cluster::V4_PREFERRED: + return Network::DnsLookupFamily::V4Preferred; default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 91b9c2133a887..1c7a77979fee8 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -639,6 +639,10 @@ class ClusterInfoImpl : public ClusterInfo, clusterType() const override { return cluster_type_; } + const absl::optional& + lbRoundRobinConfig() const override { + return lb_round_robin_config_; + } const absl::optional& lbLeastRequestConfig() const override { return lb_least_request_config_; @@ -779,6 +783,7 @@ class ClusterInfoImpl : public ClusterInfo, const std::string maintenance_mode_runtime_key_; const Network::Address::InstanceConstSharedPtr source_address_; LoadBalancerType lb_type_; + absl::optional lb_round_robin_config_; absl::optional lb_least_request_config_; absl::optional lb_ring_hash_config_; diff --git a/source/common/watchdog/BUILD b/source/common/watchdog/BUILD index 21fbfd3302ce5..ce5210c0c245d 100644 --- a/source/common/watchdog/BUILD +++ b/source/common/watchdog/BUILD @@ -19,7 +19,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/protobuf:utility_lib", "//source/common/thread:terminate_thread_lib", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) @@ -33,6 +33,6 @@ envoy_cc_library( "//source/common/config:utility_lib", "//source/common/protobuf", "//source/common/protobuf:message_validator_lib", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) diff --git a/source/common/watchdog/abort_action.cc b/source/common/watchdog/abort_action.cc index daa405fdda249..d1461af7bcedb 100644 --- a/source/common/watchdog/abort_action.cc +++ b/source/common/watchdog/abort_action.cc @@ -14,7 +14,7 @@ namespace { constexpr uint64_t DefaultWaitDurationMs = 5000; } // end namespace -AbortAction::AbortAction(envoy::watchdog::v3alpha::AbortActionConfig& config, +AbortAction::AbortAction(envoy::watchdog::v3::AbortActionConfig& config, Server::Configuration::GuardDogActionFactoryContext& /*context*/) : wait_duration_(absl::Milliseconds( PROTOBUF_GET_MS_OR_DEFAULT(config, wait_duration, DefaultWaitDurationMs))) {} diff --git a/source/common/watchdog/abort_action.h b/source/common/watchdog/abort_action.h index 5170c8bbea000..e6291657db1d6 100644 --- a/source/common/watchdog/abort_action.h +++ b/source/common/watchdog/abort_action.h @@ -2,7 +2,7 @@ #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" namespace Envoy { namespace Watchdog { @@ -12,7 +12,7 @@ namespace Watchdog { */ class AbortAction : public Server::Configuration::GuardDogAction { public: - AbortAction(envoy::watchdog::v3alpha::AbortActionConfig& config, + AbortAction(envoy::watchdog::v3::AbortActionConfig& config, Server::Configuration::GuardDogActionFactoryContext& context); void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, diff --git a/source/common/watchdog/abort_action_config.h b/source/common/watchdog/abort_action_config.h index 54f2169bf15f6..65541a4bfdd41 100644 --- a/source/common/watchdog/abort_action_config.h +++ b/source/common/watchdog/abort_action_config.h @@ -1,7 +1,7 @@ #pragma once #include "envoy/server/guarddog_config.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" #include "source/common/protobuf/protobuf.h" @@ -22,7 +22,7 @@ class AbortActionFactory : public Server::Configuration::GuardDogActionFactory { std::string name() const override { return "envoy.watchdog.abort_action"; } - using AbortActionConfig = envoy::watchdog::v3alpha::AbortActionConfig; + using AbortActionConfig = envoy::watchdog::v3::AbortActionConfig; }; } // namespace Watchdog diff --git a/source/docs/logging.md b/source/docs/logging.md new file mode 100644 index 0000000000000..965a50f77048a --- /dev/null +++ b/source/docs/logging.md @@ -0,0 +1,89 @@ +### Overview + +Envoy uses the [spdlog library](https://github.com/gabime/spdlog#readme) for logging +through a variety of Envoy specific macros. + +### Concepts + +#### Level + +Log messages are emitted with a log level chosen from one of the following: +* trace +* debug +* info +* warn +* error +* critical + +This log level can be used to restrict which log messages are actually +shown via the `setLevel()` method of `Envoy::Logger::Logger` or via the command +line argument `--l `. Any messages which has a level less than the specified +level will be squelched. + +In addition, the log level is typically show in the emitted log line. For example +in the following line, you can see the level is `debug`: + +``` +[2021-09-22 18:39:01.268][28][debug][pool] [source/common/conn_pool/conn_pool_base.cc:293] [C18299946955195659044] attaching to next stream +``` + +#### ID + +In addition the the level, every log is emitted with an ID. This ID is not +a numeric ID (like a stream ID or a connection ID) but is instead a token that +is used to groups log messages in by category. The list of known ID is defined +in `ALL_LOGGER_IDS` from `source/common/common/logger.h`. Similar to level, these +IDs show up in log lines. For example in the following line, you can see the +ID is `pool`: + +``` +[2021-09-22 18:39:01.268][28][debug][pool] [source/common/conn_pool/conn_pool_base.cc:293] [C18299946955195659044] attaching to next stream +``` + +### APIs + +#### ENVOY_LOG + +Most log messages in Envoy are generated via the `ENVOY_LOG()` macro. For example: + +``` +ENVOY_LOG(debug, "subset lb: fallback load balancer disabled"); +``` + +This macro takes the log level as the first argument and the log message as the +second argument. However the ID is not explicitly specified. Instead, the ID +typically comes via the class inheriting from `Logger::Loggable`. By doing this, +`ENVOY_LOG()` calls are able to find the relevant log ID. + +#### ENVOY_LOG_TO_LOGGER + +Under some circumstances, code will not be in a method of a class which extends +`Loggable`. In those cases, there are a couple of options. One is to use +`ENVOY_LOG_TO_LOGGER` and pass in an existing logger. This logger can come via +the caller, or by requesting the logger for a specific ID. For example: + +``` + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::pool), warn, + "Failed to create Http/3 client. Transport socket " + "factory is not configured correctly."); +``` + +#### ENVOY_LOG_MISC + +As a last resort, the `ENVOY_LOG_MISC` macro can be used to log with the `misc` ID. For +example: + +``` +ENVOY_LOG_MISC(warn, "failed to enable core dump"); +``` +However, it is usually much better to log to a more specific ID. + +#### ENVOY_CONN_LOG / ENVOY_STREAM_LOG + +There is another API which can be used specifically for `Connection` or `Stream` +related log messages. `ENVOY_CONN_LOG` takes an additional `Connection` argument +and `ENVOY_STREAM_LOG` takes an additional `Stream` argument. These macros work +like `ENVOY_LOG` except that they prepend the log message with `[C123]` or +`[C123][S456` based on the connection/stream ID of the specified argument. +Note that the IDs here are the Envoy IDs *NOT* the on-the-wire IDs from HTTP/2 +or HTTP/3. diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index b13edd3fc792f..2615d76b9eb7a 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -225,6 +225,7 @@ int MainCommon::main(int argc, char** argv, PostServerHook hook) { // handling, such as running in a chroot jail. absl::InitializeSymbolizer(argv[0]); #endif + Thread::MainThread main_thread; std::unique_ptr main_common; // Initialize the server's main context under a try/catch loop and simply return EXIT_FAILURE diff --git a/source/exe/main_common.h b/source/exe/main_common.h index d61ecdac4ee06..a393c841e8b3a 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -138,6 +138,8 @@ class MainCommon { static int main(int argc, char** argv, PostServerHook hook = nullptr); private: + Thread::MainThread main_thread_; + #ifdef ENVOY_HANDLE_SIGNALS Envoy::SignalAction handle_sigs_; Envoy::TerminateHandler log_on_terminate_; diff --git a/source/extensions/BUILD b/source/extensions/BUILD index 5d4f6c8a9b745..7105032fd6d61 100644 --- a/source/extensions/BUILD +++ b/source/extensions/BUILD @@ -1,6 +1,14 @@ +load("//bazel:utils.bzl", "json_data") +load(":extensions_build_config.bzl", "EXTENSIONS") + licenses(["notice"]) # Apache 2 exports_files([ "extensions_metadata.yaml", "extensions_build_config.bzl", ]) + +json_data( + name = "extensions_build_config", + data = EXTENSIONS, +) diff --git a/source/extensions/access_loggers/common/grpc_access_logger.h b/source/extensions/access_loggers/common/grpc_access_logger.h index 416d19164662e..921186875fbe5 100644 --- a/source/extensions/access_loggers/common/grpc_access_logger.h +++ b/source/extensions/access_loggers/common/grpc_access_logger.h @@ -68,9 +68,8 @@ template class GrpcAccessLogge * @param config supplies the configuration for the logger. * @return GrpcAccessLoggerSharedPtr ready for logging requests. */ - virtual typename GrpcAccessLogger::SharedPtr getOrCreateLogger(const ConfigProto& config, - GrpcAccessLoggerType logger_type, - Stats::Scope& scope) PURE; + virtual typename GrpcAccessLogger::SharedPtr + getOrCreateLogger(const ConfigProto& config, GrpcAccessLoggerType logger_type) PURE; }; template class GrpcAccessLogClient { @@ -172,7 +171,7 @@ class GrpcAccessLogger : public Detail::GrpcAccessLoggerenableTimer(buffer_flush_interval_msec_); } - void log(HttpLogProto&& entry) { + void log(HttpLogProto&& entry) override { if (!canLogMore()) { return; } @@ -183,7 +182,7 @@ class GrpcAccessLogger : public Detail::GrpcAccessLogger= max_buffer_size_bytes_) { @@ -252,15 +251,14 @@ class GrpcAccessLoggerCache : public Singleton::Instance, GrpcAccessLoggerCache(Grpc::AsyncClientManager& async_client_manager, Stats::Scope& scope, ThreadLocal::SlotAllocator& tls) - : async_client_manager_(async_client_manager), scope_(scope), tls_slot_(tls.allocateSlot()) { + : scope_(scope), async_client_manager_(async_client_manager), tls_slot_(tls.allocateSlot()) { tls_slot_->set([](Event::Dispatcher& dispatcher) { return std::make_shared(dispatcher); }); } - typename GrpcAccessLogger::SharedPtr getOrCreateLogger(const ConfigProto& config, - GrpcAccessLoggerType logger_type, - Stats::Scope& scope) override { + typename GrpcAccessLogger::SharedPtr + getOrCreateLogger(const ConfigProto& config, GrpcAccessLoggerType logger_type) override { // TODO(euroelessar): Consider cleaning up loggers. auto& cache = tls_slot_->getTyped(); const auto cache_key = std::make_pair(MessageUtil::hash(config), logger_type); @@ -268,17 +266,23 @@ class GrpcAccessLoggerCache : public Singleton::Instance, if (it != cache.access_loggers_.end()) { return it->second; } + // We pass skip_cluster_check=true to factoryForGrpcService in order to avoid throwing + // exceptions in worker threads. Call sites of this getOrCreateLogger must check the cluster + // availability via ClusterManager::checkActiveStaticCluster beforehand, and throw exceptions in + // the main thread if necessary. + auto client = async_client_manager_.factoryForGrpcService(config.grpc_service(), scope_, true) + ->createUncachedRawAsyncClient(); const auto logger = createLogger( - config, - async_client_manager_.factoryForGrpcService(config.grpc_service(), scope_, false) - ->createUncachedRawAsyncClient(), + config, std::move(client), std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, buffer_flush_interval, 1000)), - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, buffer_size_bytes, 16384), cache.dispatcher_, - scope); + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, buffer_size_bytes, 16384), cache.dispatcher_); cache.access_loggers_.emplace(cache_key, logger); return logger; } +protected: + Stats::Scope& scope_; + private: /** * Per-thread cache. @@ -297,10 +301,9 @@ class GrpcAccessLoggerCache : public Singleton::Instance, virtual typename GrpcAccessLogger::SharedPtr createLogger(const ConfigProto& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) PURE; + Event::Dispatcher& dispatcher) PURE; Grpc::AsyncClientManager& async_client_manager_; - Stats::Scope& scope_; ThreadLocal::SlotPtr tls_slot_; }; diff --git a/source/extensions/access_loggers/grpc/config_utils.cc b/source/extensions/access_loggers/grpc/config_utils.cc index 0010109617cd7..e74a2892a826f 100644 --- a/source/extensions/access_loggers/grpc/config_utils.cc +++ b/source/extensions/access_loggers/grpc/config_utils.cc @@ -15,7 +15,7 @@ getGrpcAccessLoggerCacheSingleton(Server::Configuration::CommonFactoryContext& c return context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(grpc_access_logger_cache), [&context] { return std::make_shared( - context.clusterManager().grpcAsyncClientManager(), context.scope(), + context.clusterManager().grpcAsyncClientManager(), context.serverScope(), context.threadLocal(), context.localInfo()); }); } diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc index e544a52af1911..ca45d2c5acaf5 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc @@ -53,10 +53,10 @@ GrpcAccessLoggerImpl::SharedPtr GrpcAccessLoggerCacheImpl::createLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) { + Event::Dispatcher& dispatcher) { return std::make_shared(client, config.log_name(), buffer_flush_interval_msec, max_buffer_size_bytes, - dispatcher, local_info_, scope); + dispatcher, local_info_, scope_); } } // namespace GrpcCommon diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h index 43b5423274762..c502f4365d891 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h @@ -54,7 +54,7 @@ class GrpcAccessLoggerCacheImpl createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) override; + Event::Dispatcher& dispatcher) override; const LocalInfo::LocalInfo& local_info_; }; diff --git a/source/extensions/access_loggers/grpc/http_config.cc b/source/extensions/access_loggers/grpc/http_config.cc index 4d333f6d91fdc..5d3b795100670 100644 --- a/source/extensions/access_loggers/grpc/http_config.cc +++ b/source/extensions/access_loggers/grpc/http_config.cc @@ -27,9 +27,13 @@ AccessLog::InstanceSharedPtr HttpGrpcAccessLogFactory::createAccessLogInstance( const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig&>( config, context.messageValidationVisitor()); - return std::make_shared(std::move(filter), proto_config, context.threadLocal(), - GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), - context.scope()); + const auto service_config = proto_config.common_config().grpc_service(); + if (service_config.has_envoy_grpc()) { + context.clusterManager().checkActiveStaticCluster(service_config.envoy_grpc().cluster_name()); + } + return std::make_shared( + std::move(filter), proto_config, context.threadLocal(), + GrpcCommon::getGrpcAccessLoggerCacheSingleton(context)); } ProtobufTypes::MessagePtr HttpGrpcAccessLogFactory::createEmptyConfigProto() { diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc index f35715c37ad98..e3de3291a40cf 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc @@ -23,29 +23,30 @@ HttpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger( GrpcCommon::GrpcAccessLoggerSharedPtr logger) : logger_(std::move(logger)) {} -HttpGrpcAccessLog::HttpGrpcAccessLog( - AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope) - : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)), +HttpGrpcAccessLog::HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, + const HttpGrpcAccessLogConfig config, + ThreadLocal::SlotAllocator& tls, + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache) + : Common::ImplBase(std::move(filter)), + config_(std::make_shared(std::move(config))), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { - for (const auto& header : config_.additional_request_headers_to_log()) { + for (const auto& header : config_->additional_request_headers_to_log()) { request_headers_to_log_.emplace_back(header); } - for (const auto& header : config_.additional_response_headers_to_log()) { + for (const auto& header : config_->additional_response_headers_to_log()) { response_headers_to_log_.emplace_back(header); } - for (const auto& header : config_.additional_response_trailers_to_log()) { + for (const auto& header : config_->additional_response_trailers_to_log()) { response_trailers_to_log_.emplace_back(header); } - Envoy::Config::Utility::checkTransportVersion(config_.common_config()); - tls_slot_->set([this](Event::Dispatcher&) { - return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), Common::GrpcAccessLoggerType::HTTP, scope_)); - }); + Envoy::Config::Utility::checkTransportVersion(config_->common_config()); + tls_slot_->set( + [config = config_, access_logger_cache = access_logger_cache_](Event::Dispatcher&) { + return std::make_shared(access_logger_cache->getOrCreateLogger( + config->common_config(), Common::GrpcAccessLoggerType::HTTP)); + }); } void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, @@ -56,7 +57,7 @@ void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, // TODO(mattklein123): Populate sample_rate field. envoy::data::accesslog::v3::HTTPAccessLogEntry log_entry; GrpcCommon::Utility::extractCommonAccessLogProperties(*log_entry.mutable_common_properties(), - stream_info, config_.common_config()); + stream_info, config_->common_config()); if (stream_info.protocol()) { switch (stream_info.protocol().value()) { diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h index d403596f8decc..6cfaf97d56177 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h @@ -21,16 +21,17 @@ namespace HttpGrpc { // TODO(mattklein123): Stats +using envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig; +using HttpGrpcAccessLogConfigConstSharedPtr = std::shared_ptr; + /** * Access log Instance that streams HTTP logs over gRPC. */ class HttpGrpcAccessLog : public Common::ImplBase { public: - HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config, + HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, const HttpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope); + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache); private: /** @@ -48,8 +49,7 @@ class HttpGrpcAccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; - Stats::Scope& scope_; - const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config_; + const HttpGrpcAccessLogConfigConstSharedPtr config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; std::vector request_headers_to_log_; diff --git a/source/extensions/access_loggers/grpc/tcp_config.cc b/source/extensions/access_loggers/grpc/tcp_config.cc index 185a76e934d8a..495cdfa3738c9 100644 --- a/source/extensions/access_loggers/grpc/tcp_config.cc +++ b/source/extensions/access_loggers/grpc/tcp_config.cc @@ -27,9 +27,12 @@ AccessLog::InstanceSharedPtr TcpGrpcAccessLogFactory::createAccessLogInstance( const envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig&>( config, context.messageValidationVisitor()); + const auto service_config = proto_config.common_config().grpc_service(); + if (service_config.has_envoy_grpc()) { + context.clusterManager().checkActiveStaticCluster(service_config.envoy_grpc().cluster_name()); + } return std::make_shared(std::move(filter), proto_config, context.threadLocal(), - GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), - context.scope()); + GrpcCommon::getGrpcAccessLoggerCacheSingleton(context)); } ProtobufTypes::MessagePtr TcpGrpcAccessLogFactory::createEmptyConfigProto() { diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc index 7fbcee911d5bc..fb1a2a4d0bd2c 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc @@ -17,18 +17,19 @@ namespace TcpGrpc { TcpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger(GrpcCommon::GrpcAccessLoggerSharedPtr logger) : logger_(std::move(logger)) {} -TcpGrpcAccessLog::TcpGrpcAccessLog( - AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope) - : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)), +TcpGrpcAccessLog::TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, + const TcpGrpcAccessLogConfig config, + ThreadLocal::SlotAllocator& tls, + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache) + : Common::ImplBase(std::move(filter)), + config_(std::make_shared(std::move(config))), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { - Config::Utility::checkTransportVersion(config_.common_config()); - tls_slot_->set([this](Event::Dispatcher&) { - return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), Common::GrpcAccessLoggerType::TCP, scope_)); - }); + Config::Utility::checkTransportVersion(config_->common_config()); + tls_slot_->set( + [config = config_, access_logger_cache = access_logger_cache_](Event::Dispatcher&) { + return std::make_shared(access_logger_cache->getOrCreateLogger( + config->common_config(), Common::GrpcAccessLoggerType::TCP)); + }); } void TcpGrpcAccessLog::emitLog(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, @@ -37,7 +38,7 @@ void TcpGrpcAccessLog::emitLog(const Http::RequestHeaderMap&, const Http::Respon // Common log properties. envoy::data::accesslog::v3::TCPAccessLogEntry log_entry; GrpcCommon::Utility::extractCommonAccessLogProperties(*log_entry.mutable_common_properties(), - stream_info, config_.common_config()); + stream_info, config_->common_config()); envoy::data::accesslog::v3::ConnectionProperties& connection_properties = *log_entry.mutable_connection_properties(); diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h index fba13f16d6c6d..897091d0367ad 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h @@ -20,16 +20,17 @@ namespace TcpGrpc { // TODO(mattklein123): Stats +using envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig; +using TcpGrpcAccessLogConfigConstSharedPtr = std::shared_ptr; + /** * Access log Instance that streams TCP logs over gRPC. */ class TcpGrpcAccessLog : public Common::ImplBase { public: - TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config, + TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, const TcpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope); + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache); private: /** @@ -47,8 +48,7 @@ class TcpGrpcAccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; - Stats::Scope& scope_; - const envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config_; + const TcpGrpcAccessLogConfigConstSharedPtr config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; }; diff --git a/source/extensions/access_loggers/open_telemetry/BUILD b/source/extensions/access_loggers/open_telemetry/BUILD index c500ee1aea1f3..1599f25a4c9c9 100644 --- a/source/extensions/access_loggers/open_telemetry/BUILD +++ b/source/extensions/access_loggers/open_telemetry/BUILD @@ -23,7 +23,7 @@ envoy_cc_library( "//source/common/protobuf", "//source/extensions/access_loggers/common:grpc_access_logger", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", "@opentelemetry_proto//:logs_cc_proto", ], ) @@ -42,7 +42,7 @@ envoy_cc_library( "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", "@opentelemetry_proto//:logs_cc_proto", ], ) @@ -61,11 +61,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up. - extra_visibility = [ - "//test/common/access_log:__subpackages__", - "//test/integration:__subpackages__", - ], deps = [ "//envoy/server:access_log_config_interface", "//source/common/common:assert_lib", @@ -74,6 +69,6 @@ envoy_cc_extension( "//source/extensions/access_loggers/open_telemetry:access_log_lib", "//source/extensions/access_loggers/open_telemetry:access_log_proto_descriptors_lib", "//source/extensions/access_loggers/open_telemetry:grpc_access_log_lib", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/access_loggers/open_telemetry/access_log_impl.cc b/source/extensions/access_loggers/open_telemetry/access_log_impl.cc index 48c4166395daa..38b6a5d644b93 100644 --- a/source/extensions/access_loggers/open_telemetry/access_log_impl.cc +++ b/source/extensions/access_loggers/open_telemetry/access_log_impl.cc @@ -5,7 +5,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/data/accesslog/v3/accesslog.pb.h" #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "source/common/common/assert.h" #include "source/common/config/utility.h" @@ -34,16 +34,15 @@ AccessLog::ThreadLocalLogger::ThreadLocalLogger(GrpcAccessLoggerSharedPtr logger AccessLog::AccessLog( ::Envoy::AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope) - : Common::ImplBase(std::move(filter)), scope_(scope), tls_slot_(tls.allocateSlot()), + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig config, + ThreadLocal::SlotAllocator& tls, GrpcAccessLoggerCacheSharedPtr access_logger_cache) + : Common::ImplBase(std::move(filter)), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { Envoy::Config::Utility::checkTransportVersion(config.common_config()); tls_slot_->set([this, config](Event::Dispatcher&) { return std::make_shared(access_logger_cache_->getOrCreateLogger( - config.common_config(), Common::GrpcAccessLoggerType::HTTP, scope_)); + config.common_config(), Common::GrpcAccessLoggerType::HTTP)); }); ProtobufWkt::Struct body_format; diff --git a/source/extensions/access_loggers/open_telemetry/access_log_impl.h b/source/extensions/access_loggers/open_telemetry/access_log_impl.h index 1bd6b34804c8b..7a36bad2639e0 100644 --- a/source/extensions/access_loggers/open_telemetry/access_log_impl.h +++ b/source/extensions/access_loggers/open_telemetry/access_log_impl.h @@ -4,7 +4,7 @@ #include #include "envoy/access_log/access_log.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "envoy/grpc/async_client.h" #include "envoy/grpc/async_client_manager.h" #include "envoy/local_info/local_info.h" @@ -33,11 +33,10 @@ namespace OpenTelemetry { */ class AccessLog : public Common::ImplBase { public: - AccessLog(::Envoy::AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig - config, - ThreadLocal::SlotAllocator& tls, GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope); + AccessLog( + ::Envoy::AccessLog::FilterPtr&& filter, + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig config, + ThreadLocal::SlotAllocator& tls, GrpcAccessLoggerCacheSharedPtr access_logger_cache); private: /** @@ -55,7 +54,6 @@ class AccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; - Stats::Scope& scope_; const ThreadLocal::SlotPtr tls_slot_; const GrpcAccessLoggerCacheSharedPtr access_logger_cache_; std::unique_ptr body_formatter_; diff --git a/source/extensions/access_loggers/open_telemetry/config.cc b/source/extensions/access_loggers/open_telemetry/config.cc index 2b0f02f3acd2b..f23640456c3bc 100644 --- a/source/extensions/access_loggers/open_telemetry/config.cc +++ b/source/extensions/access_loggers/open_telemetry/config.cc @@ -1,7 +1,7 @@ #include "source/extensions/access_loggers/open_telemetry/config.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.validate.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.validate.h" #include "envoy/registry/registry.h" #include "envoy/server/access_log_config.h" #include "envoy/server/filter_config.h" @@ -37,18 +37,17 @@ AccessLogFactory::createAccessLogInstance(const Protobuf::Message& config, Server::Configuration::CommonFactoryContext& context) { validateProtoDescriptors(); - const auto& proto_config = - MessageUtil::downcastAndValidate( - config, context.messageValidationVisitor()); + const auto& proto_config = MessageUtil::downcastAndValidate< + const envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig&>( + config, context.messageValidationVisitor()); return std::make_shared(std::move(filter), proto_config, context.threadLocal(), - getAccessLoggerCacheSingleton(context), context.scope()); + getAccessLoggerCacheSingleton(context)); } ProtobufTypes::MessagePtr AccessLogFactory::createEmptyConfigProto() { return std::make_unique< - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig>(); + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig>(); } std::string AccessLogFactory::name() const { return "envoy.access_loggers.open_telemetry"; } diff --git a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc index 215f7cfba9e4e..38d9616922a91 100644 --- a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc @@ -1,7 +1,7 @@ #include "source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h" #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "envoy/grpc/async_client_manager.h" #include "envoy/local_info/local_info.h" @@ -76,10 +76,10 @@ GrpcAccessLoggerImpl::SharedPtr GrpcAccessLoggerCacheImpl::createLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) { + Event::Dispatcher& dispatcher) { return std::make_shared(client, config.log_name(), buffer_flush_interval_msec, max_buffer_size_bytes, - dispatcher, local_info_, scope); + dispatcher, local_info_, scope_); } } // namespace OpenTelemetry diff --git a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h index 7af83f529de4c..85aa0ad8d6943 100644 --- a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h @@ -68,7 +68,7 @@ class GrpcAccessLoggerCacheImpl createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) override; + Event::Dispatcher& dispatcher) override; const LocalInfo::LocalInfo& local_info_; }; diff --git a/source/extensions/access_loggers/wasm/config.cc b/source/extensions/access_loggers/wasm/config.cc index d200351dc227d..467398b7369ac 100644 --- a/source/extensions/access_loggers/wasm/config.cc +++ b/source/extensions/access_loggers/wasm/config.cc @@ -41,13 +41,15 @@ AccessLog::InstanceSharedPtr WasmAccessLogFactory::createAccessLogInstance( }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { throw Common::Wasm::WasmException( fmt::format("Unable to create Wasm access log {}", plugin->name_)); } + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); return access_log; } diff --git a/source/extensions/all_extensions.bzl b/source/extensions/all_extensions.bzl index 761c3c75e0bcc..e02b98e25030b 100644 --- a/source/extensions/all_extensions.bzl +++ b/source/extensions/all_extensions.bzl @@ -4,7 +4,6 @@ load("@envoy_build_config//:extensions_build_config.bzl", "EXTENSIONS") # These extensions are registered using the extension system but are required for the core Envoy build. # The map may be overridden by extensions specified in envoy_build_config. _required_extensions = { - "envoy.common.crypto.utility_lib": "//source/extensions/common/crypto:utility_lib", "envoy.http.original_ip_detection.xff": "//source/extensions/http/original_ip_detection/xff:config", "envoy.request_id.uuid": "//source/extensions/request_id/uuid:config", "envoy.transport_sockets.tls": "//source/extensions/transport_sockets/tls:config", diff --git a/source/extensions/bootstrap/wasm/config.cc b/source/extensions/bootstrap/wasm/config.cc index f2771f596814c..4f34f7e88dab4 100644 --- a/source/extensions/bootstrap/wasm/config.cc +++ b/source/extensions/bootstrap/wasm/config.cc @@ -32,8 +32,8 @@ void WasmServiceExtension::createWasm(Server::Configuration::ServerFactoryContex if (config_.singleton()) { // Return a Wasm VM which will be stored as a singleton by the Server. wasm_service_ = std::make_unique( - plugin, - Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, context.dispatcher())); + plugin, Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, + context.mainThreadDispatcher())); return; } // Per-thread WASM VM. @@ -49,8 +49,8 @@ void WasmServiceExtension::createWasm(Server::Configuration::ServerFactoryContex }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { // NB: throw if we get a synchronous configuration failures as this is how such failures are // reported to xDS. @@ -65,7 +65,8 @@ WasmFactory::createBootstrapExtension(const Protobuf::Message& config, auto typed_config = MessageUtil::downcastAndValidate( config, context.messageValidationContext().staticValidationVisitor()); - + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); return std::make_unique(typed_config, context); } diff --git a/source/extensions/clusters/aggregate/cluster.cc b/source/extensions/clusters/aggregate/cluster.cc index c7f21b5abdbd3..6e6b7ab77cab3 100644 --- a/source/extensions/clusters/aggregate/cluster.cc +++ b/source/extensions/clusters/aggregate/cluster.cc @@ -19,7 +19,7 @@ Cluster::Cluster(const envoy::config::cluster::v3::Cluster& cluster, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : Upstream::ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, factory_context.mainThreadDispatcher().timeSource()), cluster_manager_(cluster_manager), runtime_(runtime), random_(random), clusters_(std::make_shared(config.clusters().begin(), config.clusters().end())) {} diff --git a/source/extensions/clusters/dynamic_forward_proxy/BUILD b/source/extensions/clusters/dynamic_forward_proxy/BUILD index 44da3c9be55d9..69010672a91e1 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/source/extensions/clusters/dynamic_forward_proxy/BUILD @@ -18,6 +18,7 @@ envoy_cc_extension( "//source/common/upstream:logical_host_lib", "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", "//source/extensions/common/dynamic_forward_proxy:dns_cache_manager_impl", + "//source/extensions/filters/network/common:utility_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg_cc_proto", diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index cc72fc392466a..62209fbd8b98c 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -22,7 +22,8 @@ Cluster::Cluster( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : Upstream::BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, + factory_context.mainThreadDispatcher().timeSource()), dns_cache_manager_(cache_manager_factory.get()), dns_cache_(dns_cache_manager_->getCache(config.dns_cache_config())), update_callbacks_handle_(dns_cache_->addUpdateCallbacks(*this)), local_info_(local_info) {} @@ -177,8 +178,7 @@ ClusterFactory::createClusterWithConfig( Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context, Stats::ScopePtr&& stats_scope) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.tls(), context.api(), - context.runtime(), context.stats(), context.messageValidationVisitor()); + context); envoy::config::cluster::v3::Cluster cluster_config = cluster; if (!cluster_config.has_upstream_http_protocol_options()) { // This sets defaults which will only apply if using old style http config. diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index 59d6f7ad0d3ca..e497a62a9ccf7 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -25,7 +25,8 @@ RedisCluster::RedisCluster( Stats::ScopePtr&& stats_scope, bool added_via_api, ClusterSlotUpdateCallBackSharedPtr lb_factory) : Upstream::BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, + factory_context.mainThreadDispatcher().timeSource()), cluster_manager_(cluster_manager), cluster_refresh_rate_(std::chrono::milliseconds( PROTOBUF_GET_MS_OR_DEFAULT(redis_cluster, cluster_refresh_rate, 5000))), @@ -37,7 +38,7 @@ RedisCluster::RedisCluster( PROTOBUF_GET_WRAPPED_OR_DEFAULT(redis_cluster, redirect_refresh_threshold, 5)), failure_refresh_threshold_(redis_cluster.failure_refresh_threshold()), host_degraded_refresh_threshold_(redis_cluster.host_degraded_refresh_threshold()), - dispatcher_(factory_context.dispatcher()), dns_resolver_(std::move(dns_resolver)), + dispatcher_(factory_context.mainThreadDispatcher()), dns_resolver_(std::move(dns_resolver)), dns_lookup_family_(Upstream::getDnsLookupFamilyFromCluster(cluster)), load_assignment_(cluster.load_assignment()), local_info_(factory_context.localInfo()), random_(api.randomGenerator()), redis_discovery_session_(*this, redis_client_factory), @@ -48,7 +49,7 @@ RedisCluster::RedisCluster( NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword(info(), api)), cluster_name_(cluster.name()), refresh_manager_(Common::Redis::getClusterRefreshManager( - factory_context.singletonManager(), factory_context.dispatcher(), + factory_context.singletonManager(), factory_context.mainThreadDispatcher(), factory_context.clusterManager(), factory_context.api().timeSource())), registration_handle_(refresh_manager_->registerCluster( cluster_name_, redirect_refresh_interval_, redirect_refresh_threshold_, diff --git a/source/extensions/common/aws/BUILD b/source/extensions/common/aws/BUILD index 8cdd3fc95a72d..f8857b60a5531 100644 --- a/source/extensions/common/aws/BUILD +++ b/source/extensions/common/aws/BUILD @@ -31,7 +31,6 @@ envoy_cc_library( "//source/common/crypto:utility_lib", "//source/common/http:headers_lib", "//source/common/singleton:const_singleton", - "//source/extensions/common/crypto:utility_lib", ], ) diff --git a/source/extensions/common/crypto/BUILD b/source/extensions/common/crypto/BUILD deleted file mode 100644 index 1896816e5e960..0000000000000 --- a/source/extensions/common/crypto/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_extension", - "envoy_extension_package", -) - -licenses(["notice"]) # Apache 2 - -envoy_extension_package() - -envoy_cc_extension( - name = "utility_lib", - srcs = [ - "crypto_impl.cc", - "utility_impl.cc", - ], - hdrs = [ - "crypto_impl.h", - "utility_impl.h", - ], - external_deps = [ - "ssl", - ], - # Legacy test use. TODO(#9953) clean up. - extra_visibility = [ - "//test/common/config:__subpackages__", - "//test/common/crypto:__subpackages__", - ], - deps = [ - "//envoy/buffer:buffer_interface", - "//source/common/common:assert_lib", - "//source/common/crypto:utility_lib", - ], -) diff --git a/source/extensions/common/dynamic_forward_proxy/BUILD b/source/extensions/common/dynamic_forward_proxy/BUILD index f7daecf1ab23b..c37625350843f 100644 --- a/source/extensions/common/dynamic_forward_proxy/BUILD +++ b/source/extensions/common/dynamic_forward_proxy/BUILD @@ -29,6 +29,7 @@ envoy_cc_library( deps = [ ":dns_cache_impl", "//source/common/protobuf", + "//source/server:factory_context_base_impl_lib", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) @@ -43,11 +44,11 @@ envoy_cc_library( "//envoy/network:dns_interface", "//envoy/thread_local:thread_local_interface", "//source/common/common:cleanup_lib", + "//source/common/common:dns_utils_lib", "//source/common/common:key_value_store_lib", "//source/common/config:utility_lib", "//source/common/network:resolver_lib", "//source/common/network:utility_lib", - "//source/common/upstream:upstream_lib", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache.h b/source/extensions/common/dynamic_forward_proxy/dns_cache.h index 218c1044a0793..4500341592e71 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache.h @@ -197,6 +197,12 @@ class DnsCache { * @return RAII handle for pending request circuit breaker if the request was allowed. */ virtual Upstream::ResourceAutoIncDecPtr canCreateDnsRequest() PURE; + + /** + * Force a DNS refresh of all known hosts, ignoring any ongoing failure or success timers. This + * can be used in response to network changes which might alter DNS responses, for example. + */ + virtual void forceRefreshHosts() PURE; }; using DnsCacheSharedPtr = std::shared_ptr; @@ -215,6 +221,14 @@ class DnsCacheManager { */ virtual DnsCacheSharedPtr getCache(const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) PURE; + + /** + * Look up an existing DNS cache by name. + * @param name supplies the cache name to look up. If a cache exists with the same name it + * will be returned. + * @return pointer to the cache if it exists, nullptr otherwise. + */ + virtual DnsCacheSharedPtr lookUpCacheByName(absl::string_view cache_name) PURE; }; using DnsCacheManagerSharedPtr = std::shared_ptr; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index 604004c565cdf..d276202eb1cb4 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -2,38 +2,37 @@ #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "source/common/common/dns_utils.h" #include "source/common/common/stl_helpers.h" #include "source/common/config/utility.h" #include "source/common/http/utility.h" #include "source/common/network/resolver_impl.h" #include "source/common/network/utility.h" -// TODO(mattklein123): Move DNS family helpers to a smaller include. -#include "source/common/upstream/upstream_impl.h" - namespace Envoy { namespace Extensions { namespace Common { namespace DynamicForwardProxy { DnsCacheImpl::DnsCacheImpl( - Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Random::RandomGenerator& random, Filesystem::Instance& file_system, Runtime::Loader& loader, - Stats::Scope& root_scope, ProtobufMessage::ValidationVisitor& validation_visitor, + Server::Configuration::FactoryContextBase& context, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) - : main_thread_dispatcher_(main_thread_dispatcher), - dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), - resolver_(selectDnsResolver(config, main_thread_dispatcher)), tls_slot_(tls), - scope_(root_scope.createScope(fmt::format("dns_cache.{}.", config.name()))), + : main_thread_dispatcher_(context.mainThreadDispatcher()), + dns_lookup_family_(DnsUtils::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), + resolver_(selectDnsResolver(config, main_thread_dispatcher_)), + tls_slot_(context.threadLocal()), + scope_(context.scope().createScope(fmt::format("dns_cache.{}.", config.name()))), stats_(generateDnsCacheStats(*scope_)), - resource_manager_(*scope_, loader, config.name(), config.dns_cache_circuit_breaker()), + resource_manager_(*scope_, context.runtime(), config.name(), + config.dns_cache_circuit_breaker()), refresh_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_refresh_rate, 60000)), timeout_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_query_timeout, 5000)), failure_backoff_strategy_( Config::Utility::prepareDnsRefreshStrategy< envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig>( - config, refresh_interval_.count(), random)), - file_system_(file_system), validation_visitor_(validation_visitor), + config, refresh_interval_.count(), context.api().randomGenerator())), + file_system_(context.api().fileSystem()), + validation_visitor_(context.messageValidationVisitor()), host_ttl_(PROTOBUF_GET_MS_OR_DEFAULT(config, host_ttl, 300000)), max_hosts_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_hosts, 1024)) { tls_slot_.set([&](Event::Dispatcher&) { return std::make_shared(*this); }); @@ -53,10 +52,7 @@ DnsCacheImpl::DnsCacheImpl( // cache to load an entry. Further if this particular resolution fails all the is lost is the // potential optimization of having the entry be preresolved the first time a true consumer of // this DNS cache asks for it. - main_thread_dispatcher_.post( - [this, host = hostname.address(), default_port = hostname.port_value()]() { - startCacheLoad(host, default_port); - }); + startCacheLoad(hostname.address(), hostname.port_value()); } } @@ -194,25 +190,28 @@ void DnsCacheImpl::startCacheLoad(const std::string& host, uint16_t default_port return; } - const auto host_attributes = Http::Utility::parseAuthority(host); + primary_host = createHost(host, default_port); + startResolve(host, *primary_host); +} +DnsCacheImpl::PrimaryHostInfo* DnsCacheImpl::createHost(const std::string& host, + uint16_t default_port) { + const auto host_attributes = Http::Utility::parseAuthority(host); // TODO(mattklein123): Right now, the same host with different ports will become two // independent primary hosts with independent DNS resolutions. I'm not sure how much this will // matter, but we could consider collapsing these down and sharing the underlying DNS resolution. { absl::WriterMutexLock writer_lock{&primary_hosts_lock_}; - primary_host = primary_hosts_ - // try_emplace() is used here for direct argument forwarding. - .try_emplace(host, std::make_unique( - *this, std::string(host_attributes.host_), - host_attributes.port_.value_or(default_port), - host_attributes.is_ip_address_, - [this, host]() { onReResolve(host); }, - [this, host]() { onResolveTimeout(host); })) - .first->second.get(); + return primary_hosts_ + // try_emplace() is used here for direct argument forwarding. + .try_emplace(host, + std::make_unique( + *this, std::string(host_attributes.host_), + host_attributes.port_.value_or(default_port), + host_attributes.is_ip_address_, [this, host]() { onReResolve(host); }, + [this, host]() { onResolveTimeout(host); })) + .first->second.get(); } - - startResolve(host, *primary_host); } DnsCacheImpl::PrimaryHostInfo& DnsCacheImpl::getPrimaryHost(const std::string& host) { @@ -229,7 +228,7 @@ void DnsCacheImpl::onResolveTimeout(const std::string& host) { ASSERT(main_thread_dispatcher_.isThreadSafe()); auto& primary_host = getPrimaryHost(host); - ENVOY_LOG(debug, "host='{}' resolution timeout", host); + ENVOY_LOG_EVENT(debug, "dns_cache_resolve_timeout", "host='{}' resolution timeout", host); stats_.dns_query_timeout_.inc(); primary_host.active_query_->cancel(Network::ActiveDnsQuery::CancelReason::Timeout); finishResolve(host, Network::DnsResolver::ResolutionStatus::Failure, {}); @@ -270,6 +269,23 @@ void DnsCacheImpl::onReResolve(const std::string& host) { } } +void DnsCacheImpl::forceRefreshHosts() { + absl::ReaderMutexLock reader_lock{&primary_hosts_lock_}; + for (auto& primary_host : primary_hosts_) { + // Avoid holding the lock for longer than necessary by just triggering the refresh timer for + // each host IFF the host is not already refreshing. + // TODO(mattklein123): In the future we may want to cancel an ongoing refresh and start a new + // one to avoid a situation in which an older refresh races with a concurrent network change, + // for example. + if (primary_host.second->active_query_ == nullptr) { + ASSERT(!primary_host.second->timeout_timer_->enabled()); + primary_host.second->refresh_timer_->enableTimer(std::chrono::milliseconds(0), nullptr); + ENVOY_LOG_EVENT(debug, "force_refresh_host", "force refreshing host='{}'", + primary_host.first); + } + } +} + void DnsCacheImpl::startResolve(const std::string& host, PrimaryHostInfo& host_info) { ENVOY_LOG(debug, "starting main thread resolve for host='{}' dns='{}' port='{}'", host, host_info.host_info_->resolvedHost(), host_info.port_); @@ -288,13 +304,15 @@ void DnsCacheImpl::startResolve(const std::string& host, PrimaryHostInfo& host_i void DnsCacheImpl::finishResolve(const std::string& host, Network::DnsResolver::ResolutionStatus status, - std::list&& response, bool from_cache) { + std::list&& response, + absl::optional resolution_time) { ASSERT(main_thread_dispatcher_.isThreadSafe()); ENVOY_LOG_EVENT(debug, "dns_cache_finish_resolve", "main thread resolve complete for host '{}': {}", host, accumulateToString(response, [](const auto& dns_response) { return dns_response.address_->asString(); })); + const bool from_cache = resolution_time.has_value(); // Functions like this one that modify primary_hosts_ are only called in the main thread so we // know it is safe to use the PrimaryHostInfo pointers outside of the lock. @@ -305,9 +323,19 @@ void DnsCacheImpl::finishResolve(const std::string& host, return primary_host_it->second.get(); }(); - const bool first_resolve = !primary_host_info->host_info_->firstResolveComplete(); - primary_host_info->timeout_timer_->disableTimer(); - primary_host_info->active_query_ = nullptr; + bool first_resolve = false; + + if (!from_cache) { + first_resolve = !primary_host_info->host_info_->firstResolveComplete(); + primary_host_info->timeout_timer_->disableTimer(); + primary_host_info->active_query_ = nullptr; + + if (status == Network::DnsResolver::ResolutionStatus::Failure) { + stats_.dns_query_failure_.inc(); + } else { + stats_.dns_query_success_.inc(); + } + } // If the DNS resolver successfully resolved with an empty response list, the dns cache does not // update. This ensures that a potentially previously resolved address does not stabilize back to @@ -317,12 +345,6 @@ void DnsCacheImpl::finishResolve(const std::string& host, primary_host_info->port_) : nullptr; - if (status == Network::DnsResolver::ResolutionStatus::Failure) { - stats_.dns_query_failure_.inc(); - } else { - stats_.dns_query_success_.inc(); - } - // Only the change the address if: // 1) The new address is valid && // 2a) The host doesn't yet have an address || @@ -333,11 +355,6 @@ void DnsCacheImpl::finishResolve(const std::string& host, bool address_changed = false; auto current_address = primary_host_info->host_info_->address(); if (new_address != nullptr && (current_address == nullptr || *current_address != *new_address)) { - if (!from_cache) { - addCacheEntry(host, new_address); - } - // TODO(alyssawilk) don't immediately push cached entries to threads. - // Only serve stale entries if a configured resolve timeout has fired. ENVOY_LOG(debug, "host '{}' address has changed", host); primary_host_info->host_info_->setAddress(new_address); runAddUpdateCallbacks(host, primary_host_info->host_info_); @@ -345,14 +362,30 @@ void DnsCacheImpl::finishResolve(const std::string& host, stats_.host_address_changed_.inc(); } - if (first_resolve || address_changed) { + if (!resolution_time.has_value()) { + resolution_time = main_thread_dispatcher_.timeSource().monotonicTime(); + } + if (new_address) { + // Update the cache entry and staleness any time the ttl changes. + if (!from_cache) { + addCacheEntry(host, new_address, response.front().ttl_); + } + primary_host_info->host_info_->updateStale(resolution_time.value(), response.front().ttl_); + } + + if (first_resolve) { primary_host_info->host_info_->setFirstResolveComplete(); + } + if (first_resolve || address_changed) { + // TODO(alyssawilk) only notify threads of stale results after a resolution + // timeout. notifyThreads(host, primary_host_info->host_info_); } // Kick off the refresh timer. // TODO(mattklein123): Consider jitter here. It may not be necessary since the initial host // is populated dynamically. + // TODO(alyssawilk) also consider TTL here. if (status == Network::DnsResolver::ResolutionStatus::Success) { failure_backoff_strategy_->reset(); primary_host_info->refresh_timer_->enableTimer(refresh_interval_); @@ -429,12 +462,16 @@ DnsCacheImpl::PrimaryHostInfo::~PrimaryHostInfo() { } void DnsCacheImpl::addCacheEntry(const std::string& host, - const Network::Address::InstanceConstSharedPtr& address) { + const Network::Address::InstanceConstSharedPtr& address, + const std::chrono::seconds ttl) { if (!key_value_store_) { return; } - // TODO(alyssawilk) cache data should include TTL, or some other indicator. - const std::string value = absl::StrCat(address->asString()); + MonotonicTime now = main_thread_dispatcher_.timeSource().monotonicTime(); + uint64_t seconds_since_epoch = + std::chrono::duration_cast(now.time_since_epoch()).count(); + const std::string value = + absl::StrCat(address->asString(), "|", ttl.count(), "|", seconds_since_epoch); key_value_store_->addOrUpdate(host, value); } @@ -455,18 +492,42 @@ void DnsCacheImpl::loadCacheEntries( key_value_store_ = factory.createStore(config.key_value_config(), validation_visitor_, main_thread_dispatcher_, file_system_); KeyValueStore::ConstIterateCb load = [this](const std::string& key, const std::string& value) { - auto address = Network::Utility::parseInternetAddressAndPortNoThrow(value); - if (address == nullptr) { + Network::Address::InstanceConstSharedPtr address; + const auto parts = StringUtil::splitToken(value, "|"); + std::chrono::seconds ttl(0); + absl::optional resolution_time; + if (parts.size() == 3) { + address = Network::Utility::parseInternetAddressAndPortNoThrow(std::string(parts[0])); + if (address == nullptr) { + ENVOY_LOG(warn, "{} is not a valid address", parts[0]); + } + uint64_t ttl_int; + if (absl::SimpleAtoi(parts[1], &ttl_int) && ttl_int != 0) { + ttl = std::chrono::seconds(ttl_int); + } else { + ENVOY_LOG(warn, "{} is not a valid ttl", parts[1]); + } + uint64_t epoch_int; + if (absl::SimpleAtoi(parts[2], &epoch_int)) { + MonotonicTime now = main_thread_dispatcher_.timeSource().monotonicTime(); + const std::chrono::seconds seconds_since_epoch = + std::chrono::duration_cast(now.time_since_epoch()); + resolution_time = main_thread_dispatcher_.timeSource().monotonicTime() - + (seconds_since_epoch - std::chrono::seconds(epoch_int)); + } + } else { + ENVOY_LOG(warn, "Incorrect number of tokens in the cache line"); + } + if (address == nullptr || ttl == std::chrono::seconds(0) || !resolution_time.has_value()) { ENVOY_LOG(warn, "Unable to parse cache line '{}'", value); return KeyValueStore::Iterate::Break; } stats_.cache_load_.inc(); std::list response; - // TODO(alyssawilk) change finishResolve to actually use the TTL rather than - // putting 0 here, return the remaining TTL or indicate the result is stale. - response.emplace_back(Network::DnsResponse(address, std::chrono::seconds(0) /* ttl */)); - startCacheLoad(key, address->ip()->port()); - finishResolve(key, Network::DnsResolver::ResolutionStatus::Success, std::move(response), true); + createHost(key, address->ip()->port()); + response.emplace_back(Network::DnsResponse(address, ttl)); + finishResolve(key, Network::DnsResolver::ResolutionStatus::Success, std::move(response), + resolution_time); return KeyValueStore::Iterate::Continue; }; key_value_store_->iterate(load); diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index 28614a0181736..d10c88bd4feb2 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -5,6 +5,7 @@ #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" #include "envoy/http/filter.h" #include "envoy/network/dns.h" +#include "envoy/server/factory_context.h" #include "envoy/thread_local/thread_local.h" #include "source/common/common/cleanup.h" @@ -45,10 +46,7 @@ class DnsCacheImplTest; class DnsCacheImpl : public DnsCache, Logger::Loggable { public: - DnsCacheImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Random::RandomGenerator& random, Filesystem::Instance& file_system, - Runtime::Loader& loader, Stats::Scope& root_scope, - ProtobufMessage::ValidationVisitor& validation_visitor, + DnsCacheImpl(Server::Configuration::FactoryContextBase& context, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config); ~DnsCacheImpl() override; static DnsCacheStats generateDnsCacheStats(Stats::Scope& scope); @@ -63,6 +61,7 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable getHost(absl::string_view host_name) override; Upstream::ResourceAutoIncDecPtr canCreateDnsRequest() override; + void forceRefreshHosts() override; private: struct LoadDnsCacheEntryHandleImpl @@ -100,7 +99,8 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable last_used_time_; + std::atomic stale_at_time_; bool first_resolve_complete_ ABSL_GUARDED_BY(resolve_lock_){false}; }; @@ -177,7 +181,8 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable&& response, bool from_cache = false); + std::list&& response, + absl::optional resolution_time = {}); void runAddUpdateCallbacks(const std::string& host, const DnsHostInfoSharedPtr& host_info); void runRemoveCallbacks(const std::string& host); void notifyThreads(const std::string& host, const DnsHostInfoImplSharedPtr& resolved_info); @@ -186,10 +191,12 @@ class DnsCacheImpl : public DnsCache, Logger::Loggablesecond.cache_; } - DnsCacheSharedPtr new_cache = - std::make_shared(main_thread_dispatcher_, tls_, random_, file_system_, loader_, - root_scope_, validation_visitor_, config); + DnsCacheSharedPtr new_cache = std::make_shared(context_, config); caches_.emplace(config.name(), ActiveCache{config, new_cache}); return new_cache; } +DnsCacheSharedPtr DnsCacheManagerImpl::lookUpCacheByName(absl::string_view cache_name) { + ASSERT(context_.mainThreadDispatcher().isThreadSafe()); + const auto& existing_cache = caches_.find(cache_name); + if (existing_cache != caches_.end()) { + return existing_cache->second.cache_; + } + + return nullptr; +} + DnsCacheManagerSharedPtr DnsCacheManagerFactoryImpl::get() { - return singleton_manager_.getTyped( - SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager), [this] { - return std::make_shared(dispatcher_, tls_, random_, file_system_, - loader_, root_scope_, validation_visitor_); - }); + return context_.singletonManager().getTyped( + SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager), + [this] { return std::make_shared(context_); }); } } // namespace DynamicForwardProxy diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h index 4b27404366a8c..9ec6c434e4688 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h @@ -1,8 +1,10 @@ #pragma once #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "envoy/server/factory_context.h" #include "source/extensions/common/dynamic_forward_proxy/dns_cache.h" +#include "source/server/factory_context_base_impl.h" #include "absl/container/flat_hash_map.h" @@ -13,17 +15,12 @@ namespace DynamicForwardProxy { class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { public: - DnsCacheManagerImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Random::RandomGenerator& random, Filesystem::Instance& file_system, - Runtime::Loader& loader, Stats::Scope& root_scope, - ProtobufMessage::ValidationVisitor& validation_visitor) - : main_thread_dispatcher_(main_thread_dispatcher), tls_(tls), random_(random), - file_system_(file_system), loader_(loader), root_scope_(root_scope), - validation_visitor_(validation_visitor) {} + DnsCacheManagerImpl(Server::Configuration::FactoryContextBase& context) : context_(context) {} // DnsCacheManager DnsCacheSharedPtr getCache( const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) override; + DnsCacheSharedPtr lookUpCacheByName(absl::string_view cache_name) override; private: struct ActiveCache { @@ -35,38 +32,19 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { DnsCacheSharedPtr cache_; }; - Event::Dispatcher& main_thread_dispatcher_; - ThreadLocal::SlotAllocator& tls_; - Random::RandomGenerator& random_; - Filesystem::Instance& file_system_; - Runtime::Loader& loader_; - Stats::Scope& root_scope_; - ProtobufMessage::ValidationVisitor& validation_visitor_; - + Server::FactoryContextBaseImpl context_; absl::flat_hash_map caches_; }; class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { public: - DnsCacheManagerFactoryImpl(Singleton::Manager& singleton_manager, Event::Dispatcher& dispatcher, - ThreadLocal::SlotAllocator& tls, Api::Api& api, - Runtime::Loader& loader, Stats::Scope& root_scope, - ProtobufMessage::ValidationVisitor& validation_visitor) - : singleton_manager_(singleton_manager), dispatcher_(dispatcher), tls_(tls), - random_(api.randomGenerator()), file_system_(api.fileSystem()), loader_(loader), - root_scope_(root_scope), validation_visitor_(validation_visitor) {} + DnsCacheManagerFactoryImpl(Server::Configuration::FactoryContextBase& context) + : context_(context) {} DnsCacheManagerSharedPtr get() override; private: - Singleton::Manager& singleton_manager_; - Event::Dispatcher& dispatcher_; - ThreadLocal::SlotAllocator& tls_; - Random::RandomGenerator& random_; - Filesystem::Instance& file_system_; - Runtime::Loader& loader_; - Stats::Scope& root_scope_; - ProtobufMessage::ValidationVisitor& validation_visitor_; + Server::FactoryContextBaseImpl context_; }; } // namespace DynamicForwardProxy diff --git a/source/extensions/common/utility.h b/source/extensions/common/utility.h index 7b326227222ec..8e5d5a62f83b9 100644 --- a/source/extensions/common/utility.h +++ b/source/extensions/common/utility.h @@ -32,12 +32,9 @@ class ExtensionNameUtil { UNREFERENCED_PARAMETER(runtime); return Status::Block; #else - bool warn_only = true; - - if (runtime && !runtime->snapshot().deprecatedFeatureEnabled( - "envoy.deprecated_features.allow_deprecated_extension_names", true)) { - warn_only = false; - } + const bool warn_only = + runtime && runtime->snapshot().deprecatedFeatureEnabled( + "envoy.deprecated_features.allow_deprecated_extension_names", false); return warn_only ? Status::Warn : Status::Block; #endif diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index 6612ca0b49160..49cf9725d091e 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -33,6 +33,7 @@ envoy_cc_library( "//envoy/http:codes_interface", "//envoy/http:filter_interface", "//envoy/server:lifecycle_notifier_interface", + "//envoy/stats:custom_stat_namespaces_interface", "//envoy/thread_local:thread_local_object", "//envoy/upstream:cluster_manager_interface", "//source/common/config:datasource_lib", diff --git a/source/extensions/common/wasm/context.cc b/source/extensions/common/wasm/context.cc index 41cd1b7a84d81..07447110f2106 100644 --- a/source/extensions/common/wasm/context.cc +++ b/source/extensions/common/wasm/context.cc @@ -1237,23 +1237,30 @@ WasmResult Context::defineMetric(uint32_t metric_type, std::string_view name, // TODO: Consider rethinking the scoping policy as it does not help in this case. Stats::StatNameManagedStorage storage(toAbslStringView(name), wasm()->scope_->symbolTable()); Stats::StatName stat_name = storage.statName(); + // We prefix the given name with custom_stat_name_ so that these user-defined + // custom metrics can be distinguished from native Envoy metrics. if (type == MetricType::Counter) { auto id = wasm()->nextCounterMetricId(); - auto c = &wasm()->scope_->counterFromStatName(stat_name); + Stats::Counter* c = &Stats::Utility::counterFromElements( + *wasm()->scope_, {wasm()->custom_stat_namespace_, stat_name}); wasm()->counters_.emplace(id, c); *metric_id_ptr = id; return WasmResult::Ok; } if (type == MetricType::Gauge) { auto id = wasm()->nextGaugeMetricId(); - auto g = &wasm()->scope_->gaugeFromStatName(stat_name, Stats::Gauge::ImportMode::Accumulate); + Stats::Gauge* g = &Stats::Utility::gaugeFromStatNames( + *wasm()->scope_, {wasm()->custom_stat_namespace_, stat_name}, + Stats::Gauge::ImportMode::Accumulate); wasm()->gauges_.emplace(id, g); *metric_id_ptr = id; return WasmResult::Ok; } // (type == MetricType::Histogram) { auto id = wasm()->nextHistogramMetricId(); - auto h = &wasm()->scope_->histogramFromStatName(stat_name, Stats::Histogram::Unit::Unspecified); + Stats::Histogram* h = &Stats::Utility::histogramFromStatNames( + *wasm()->scope_, {wasm()->custom_stat_namespace_, stat_name}, + Stats::Histogram::Unit::Unspecified); wasm()->histograms_.emplace(id, h); *metric_id_ptr = id; return WasmResult::Ok; diff --git a/source/extensions/common/wasm/stats_handler.h b/source/extensions/common/wasm/stats_handler.h index f196f071e17a1..ad03f3c118209 100644 --- a/source/extensions/common/wasm/stats_handler.h +++ b/source/extensions/common/wasm/stats_handler.h @@ -15,6 +15,10 @@ namespace Extensions { namespace Common { namespace Wasm { +// The custom stat namespace which prepends all the user-defined metrics. +// Note that the prefix is removed from the final output of /stats endpoints. +constexpr absl::string_view CustomStatNamespace = "wasmcustom"; + #define CREATE_WASM_STATS(COUNTER, GAUGE) \ COUNTER(remote_load_cache_hits) \ COUNTER(remote_load_cache_negative_hits) \ diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc index 290890f83dd4b..5afbb8a898014 100644 --- a/source/extensions/common/wasm/wasm.cc +++ b/source/extensions/common/wasm/wasm.cc @@ -78,7 +78,9 @@ Wasm::Wasm(WasmConfig& config, absl::string_view vm_key, const Stats::ScopeShare createWasmVm(config.config().vm_config().runtime()), config.config().vm_config().vm_id(), MessageUtil::anyToBytes(config.config().vm_config().configuration()), toStdStringView(vm_key), config.environmentVariables(), config.allowedCapabilities()), - scope_(scope), cluster_manager_(cluster_manager), dispatcher_(dispatcher), + scope_(scope), stat_name_pool_(scope_->symbolTable()), + custom_stat_namespace_(stat_name_pool_.add(CustomStatNamespace)), + cluster_manager_(cluster_manager), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), lifecycle_stats_handler_(LifecycleStatsHandler( scope, config.config().vm_config().runtime())) { lifecycle_stats_handler_.onEvent(WasmEvent::VmCreated); @@ -92,7 +94,8 @@ Wasm::Wasm(WasmHandleSharedPtr base_wasm_handle, Event::Dispatcher& dispatcher) "envoy.wasm.runtime.", toAbslStringView(base_wasm_handle->wasm()->wasm_vm()->runtime()))); }), - scope_(getWasm(base_wasm_handle)->scope_), + scope_(getWasm(base_wasm_handle)->scope_), stat_name_pool_(scope_->symbolTable()), + custom_stat_namespace_(stat_name_pool_.add(CustomStatNamespace)), cluster_manager_(getWasm(base_wasm_handle)->clusterManager()), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), lifecycle_stats_handler_(getWasm(base_wasm_handle)->lifecycle_stats_handler_) { diff --git a/source/extensions/common/wasm/wasm.h b/source/extensions/common/wasm/wasm.h index 82e6c29a5f418..3ef156ca77f72 100644 --- a/source/extensions/common/wasm/wasm.h +++ b/source/extensions/common/wasm/wasm.h @@ -98,6 +98,8 @@ class Wasm : public WasmBase, Logger::Loggable { proxy_wasm::WasmCallVoid<2> on_stats_update_; Stats::ScopeSharedPtr scope_; + Stats::StatNamePool stat_name_pool_; + const Stats::StatName custom_stat_namespace_; Upstream::ClusterManager& cluster_manager_; Event::Dispatcher& dispatcher_; Event::PostCb server_shutdown_post_cb_; diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 907828f07f8c6..7e220b0a4a725 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -297,9 +297,16 @@ EXTENSIONS = { # "envoy.key_value.file_based": "//source/extensions/key_value/file_based:config_lib", + + # + # RBAC matchers + # + + "envoy.rbac.matchers.upstream_ip_port": "//source/extensions/filters/common/rbac/matchers:upstream_ip_port_lib", } # These can be changed to ["//visibility:public"], for downstream builds which # need to directly reference Envoy extensions. EXTENSION_CONFIG_VISIBILITY = ["//:extension_config"] EXTENSION_PACKAGE_VISIBILITY = ["//:extension_library"] +CONTRIB_EXTENSION_PACKAGE_VISIBILITY = ["//:contrib_library"] diff --git a/source/extensions/extensions_metadata.yaml b/source/extensions/extensions_metadata.yaml index c8ad920b51e17..d8cec006bd6dd 100644 --- a/source/extensions/extensions_metadata.yaml +++ b/source/extensions/extensions_metadata.yaml @@ -704,3 +704,8 @@ envoy.key_value.file_based: - envoy.common.key_value security_posture: data_plane_agnostic status: alpha +envoy.rbac.matchers.upstream_ip_port: + categories: + - envoy.rbac.matchers + security_posture: unknown + status: alpha diff --git a/source/extensions/filters/common/expr/BUILD b/source/extensions/filters/common/expr/BUILD index dc146b13b9a6c..cecb210da8a65 100644 --- a/source/extensions/filters/common/expr/BUILD +++ b/source/extensions/filters/common/expr/BUILD @@ -16,6 +16,7 @@ envoy_cc_library( ":context_lib", "//source/common/http:utility_lib", "//source/common/protobuf", + "@com_google_cel_cpp//eval/public:activation", "@com_google_cel_cpp//eval/public:builtin_func_registrar", "@com_google_cel_cpp//eval/public:cel_expr_builder_factory", "@com_google_cel_cpp//eval/public:cel_expression", diff --git a/source/extensions/filters/common/expr/evaluator.h b/source/extensions/filters/common/expr/evaluator.h index 2e00d620f9e82..36926c218132a 100644 --- a/source/extensions/filters/common/expr/evaluator.h +++ b/source/extensions/filters/common/expr/evaluator.h @@ -6,6 +6,7 @@ #include "source/common/protobuf/protobuf.h" #include "source/extensions/filters/common/expr/context.h" +#include "eval/public/activation.h" #include "eval/public/cel_expression.h" #include "eval/public/cel_value.h" diff --git a/source/extensions/filters/common/ext_authz/ext_authz.h b/source/extensions/filters/common/ext_authz/ext_authz.h index 3d06846ed1123..861faac9b1bdc 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz.h +++ b/source/extensions/filters/common/ext_authz/ext_authz.h @@ -12,6 +12,7 @@ #include "envoy/tracing/http_tracer.h" #include "source/common/http/headers.h" +#include "source/common/http/utility.h" #include "source/common/singleton/const_singleton.h" namespace Envoy { @@ -78,9 +79,17 @@ struct Response { // (using "addCopy") to the response sent back to the downstream client on OK auth // responses. Http::HeaderVector response_headers_to_add; + // A set of HTTP headers returned by the authorization server, will be optionally set (using + // "setCopy") to the response sent back to the downstream client on OK auth responses. + Http::HeaderVector response_headers_to_set; // A set of HTTP headers consumed by the authorization server, will be removed // from the request to the upstream server. std::vector headers_to_remove; + // A set of query string parameters to be set (possibly overwritten) on the + // request to the upstream server. + Http::Utility::QueryParamsVector query_parameters_to_set; + // A set of query string parameters to remove from the request to the upstream server. + std::vector query_parameters_to_remove; // Optional http body used only on denied response. std::string body; // Optional http status used only on denied response. diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index d462a5179572b..eda59d1515008 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -57,23 +57,44 @@ void GrpcClientImpl::onSuccess(std::unique_ptrheaders_to_remove.push_back(Http::LowerCaseString(header)); } } + if (response->ok_response().query_parameters_to_set_size() > 0) { + for (const auto& query_parameter : response->ok_response().query_parameters_to_set()) { + authz_response->query_parameters_to_set.push_back( + std::pair(query_parameter.key(), query_parameter.value())); + } + } + if (response->ok_response().query_parameters_to_remove_size() > 0) { + for (const auto& key : response->ok_response().query_parameters_to_remove()) { + authz_response->query_parameters_to_remove.push_back(key); + } + } + // These two vectors hold header overrides of encoded response headers. if (response->ok_response().response_headers_to_add_size() > 0) { for (const auto& header : response->ok_response().response_headers_to_add()) { - authz_response->response_headers_to_add.emplace_back( - Http::LowerCaseString(header.header().key()), header.header().value()); + if (header.append().value()) { + authz_response->response_headers_to_add.emplace_back( + Http::LowerCaseString(header.header().key()), header.header().value()); + } else { + authz_response->response_headers_to_set.emplace_back( + Http::LowerCaseString(header.header().key()), header.header().value()); + } } } } } else { span.setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceUnauthz); authz_response->status = CheckStatus::Denied; + + // The default HTTP status code for denied response is 403 Forbidden. + authz_response->status_code = Http::Code::Forbidden; if (response->has_denied_response()) { toAuthzResponseHeader(authz_response, response->denied_response().headers()); - authz_response->status_code = - static_cast(response->denied_response().status().code()); + + const uint32_t status_code = response->denied_response().status().code(); + if (status_code > 0) { + authz_response->status_code = static_cast(status_code); + } authz_response->body = response->denied_response().body(); - } else { - authz_response->status_code = Http::Code::Forbidden; } } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 4dba952fede7d..6ea634396894a 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -36,7 +36,10 @@ const Response& errorResponse() { Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, + Http::HeaderVector{}, {{}}, + Http::Utility::QueryParamsVector{}, + {}, EMPTY_STRING, Http::Code::Forbidden, ProtobufWkt::Struct{}}); @@ -46,9 +49,12 @@ const Response& errorResponse() { struct SuccessResponse { SuccessResponse(const Http::HeaderMap& headers, const MatcherSharedPtr& matchers, const MatcherSharedPtr& append_matchers, - const MatcherSharedPtr& response_matchers, Response&& response) + const MatcherSharedPtr& response_matchers, + const MatcherSharedPtr& dynamic_metadata_matchers, Response&& response) : headers_(headers), matchers_(matchers), append_matchers_(append_matchers), - response_matchers_(response_matchers), response_(std::make_unique(response)) { + response_matchers_(response_matchers), + to_dynamic_metadata_matchers_(dynamic_metadata_matchers), + response_(std::make_unique(response)) { headers_.iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { // UpstreamHeaderMatcher if (matchers_->matches(header.key().getStringView())) { @@ -67,18 +73,27 @@ struct SuccessResponse { std::string(header.value().getStringView())); } if (response_matchers_->matches(header.key().getStringView())) { + // For HTTP implementation, the response headers from the auth server will, by default, be + // appended (using addCopy) to the encoded response headers. response_->response_headers_to_add.emplace_back( Http::LowerCaseString{std::string(header.key().getStringView())}, std::string(header.value().getStringView())); } + if (to_dynamic_metadata_matchers_->matches(header.key().getStringView())) { + const std::string key{header.key().getStringView()}; + const std::string value{header.value().getStringView()}; + (*response_->dynamic_metadata.mutable_fields())[key] = ValueUtil::stringValue(value); + } return Http::HeaderMap::Iterate::Continue; }); } const Http::HeaderMap& headers_; + // All matchers below are used on headers_. const MatcherSharedPtr& matchers_; const MatcherSharedPtr& append_matchers_; const MatcherSharedPtr& response_matchers_; + const MatcherSharedPtr& to_dynamic_metadata_matchers_; ResponsePtr response_; }; @@ -118,6 +133,8 @@ ClientConfig::ClientConfig(const envoy::extensions::filters::http::ext_authz::v3 config.http_service().authorization_response().allowed_client_headers())), client_header_on_success_matchers_(toClientMatchersOnSuccess( config.http_service().authorization_response().allowed_client_headers_on_success())), + to_dynamic_metadata_matchers_(toDynamicMetadataMatchers( + config.http_service().authorization_response().dynamic_metadata_from_headers())), upstream_header_matchers_(toUpstreamMatchers( config.http_service().authorization_response().allowed_upstream_headers())), upstream_header_to_append_matchers_(toUpstreamMatchers( @@ -152,6 +169,12 @@ ClientConfig::toClientMatchersOnSuccess(const envoy::type::matcher::v3::ListStri return std::make_shared(std::move(matchers)); } +MatcherSharedPtr +ClientConfig::toDynamicMetadataMatchers(const envoy::type::matcher::v3::ListStringMatcher& list) { + std::vector matchers(createStringMatchers(list)); + return std::make_shared(std::move(matchers)); +} + MatcherSharedPtr ClientConfig::toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& list) { std::vector matchers(createStringMatchers(list)); @@ -324,25 +347,41 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { // Create an Ok authorization response. if (status_code == enumToInt(Http::Code::OK)) { - SuccessResponse ok{ - message->headers(), config_->upstreamHeaderMatchers(), - config_->upstreamHeaderToAppendMatchers(), config_->clientHeaderOnSuccessMatchers(), - Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, - Http::HeaderVector{}, std::move(headers_to_remove), EMPTY_STRING, Http::Code::OK, - ProtobufWkt::Struct{}}}; + SuccessResponse ok{message->headers(), + config_->upstreamHeaderMatchers(), + config_->upstreamHeaderToAppendMatchers(), + config_->clientHeaderOnSuccessMatchers(), + config_->dynamicMetadataMatchers(), + Response{CheckStatus::OK, + Http::HeaderVector{}, + Http::HeaderVector{}, + Http::HeaderVector{}, + Http::HeaderVector{}, + Http::HeaderVector{}, + std::move(headers_to_remove), + Http::Utility::QueryParamsVector{}, + {}, + EMPTY_STRING, + Http::Code::OK, + ProtobufWkt::Struct{}}}; return std::move(ok.response_); } // Create a Denied authorization response. - SuccessResponse denied{message->headers(), config_->clientHeaderMatchers(), + SuccessResponse denied{message->headers(), + config_->clientHeaderMatchers(), config_->upstreamHeaderToAppendMatchers(), config_->clientHeaderOnSuccessMatchers(), + config_->dynamicMetadataMatchers(), Response{CheckStatus::Denied, Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, + Http::HeaderVector{}, {{}}, + Http::Utility::QueryParamsVector{}, + {}, message->bodyAsString(), static_cast(status_code), ProtobufWkt::Struct{}}}; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index ab13c2b494fa8..a517bef9480de 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -99,6 +99,11 @@ class ClientConfig { return client_header_on_success_matchers_; } + /** + * Returns a list of matchers used for selecting the headers to emit as dynamic metadata. + */ + const MatcherSharedPtr& dynamicMetadataMatchers() const { return to_dynamic_metadata_matchers_; } + /** * Returns a list of matchers used for selecting the authorization response headers that * should be send to an the upstream server. @@ -131,11 +136,14 @@ class ClientConfig { static MatcherSharedPtr toClientMatchersOnSuccess(const envoy::type::matcher::v3::ListStringMatcher& list); static MatcherSharedPtr + toDynamicMetadataMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); + static MatcherSharedPtr toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); const MatcherSharedPtr request_header_matchers_; const MatcherSharedPtr client_header_matchers_; const MatcherSharedPtr client_header_on_success_matchers_; + const MatcherSharedPtr to_dynamic_metadata_matchers_; const MatcherSharedPtr upstream_header_matchers_; const MatcherSharedPtr upstream_header_to_append_matchers_; const std::string cluster_name_; diff --git a/source/extensions/filters/common/lua/lua.h b/source/extensions/filters/common/lua/lua.h index 54bdcabe33bc5..5271ce089f5c4 100644 --- a/source/extensions/filters/common/lua/lua.h +++ b/source/extensions/filters/common/lua/lua.h @@ -69,6 +69,25 @@ namespace Lua { lua_pushnumber(state, val); \ lua_settable(state, -3); +/** + * Get absl::string_view from Lua string. This checks if the argument at index is a string + * and build an absl::string_view from it. + * @param state the current Lua state. + * @param index the index of argument. + * @return absl::string_view of Lua string with proper string length. + **/ +inline absl::string_view getStringViewFromLuaString(lua_State* state, int index) { + size_t input_size = 0; + // When the argument at index in Lua state is not a string, for example, giving a table to + // logTrace (which uses this function under the hood), Lua script exits with an error like the + // following: "[string \"...\"]:3: bad argument #1 to 'logTrace' (string expected, got table)". + // However,`luaL_checklstring` accepts a number as its argument and implicitly converts it to a + // string, since Lua provides automatic conversion between string and number values at run time + // (https://www.lua.org/manual/5.1/manual.html#2.2.1). + const char* input = luaL_checklstring(state, index, &input_size); + return absl::string_view(input, input_size); +} + /** * Calculate the maximum space needed to be aligned. */ diff --git a/source/extensions/filters/common/lua/wrappers.cc b/source/extensions/filters/common/lua/wrappers.cc index cb148e18bcfe2..bfb2a1c473259 100644 --- a/source/extensions/filters/common/lua/wrappers.cc +++ b/source/extensions/filters/common/lua/wrappers.cc @@ -42,7 +42,6 @@ int64_t timestampInSeconds(const absl::optional& system_time) { .count() : 0; } - } // namespace int BufferWrapper::luaLength(lua_State* state) { @@ -67,7 +66,7 @@ int BufferWrapper::luaGetBytes(lua_State* state) { int BufferWrapper::luaSetBytes(lua_State* state) { data_.drain(data_.length()); - absl::string_view bytes = luaL_checkstring(state, 2); + absl::string_view bytes = getStringViewFromLuaString(state, 2); data_.add(bytes); lua_pushnumber(state, data_.length()); return 1; diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index fbb8858dfd2fe..599442401f464 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -110,9 +110,11 @@ void GrpcClientImpl::onSuccess( callbacks_ = nullptr; } -void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string&, +void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string& msg, Tracing::Span&) { ASSERT(status != Grpc::Status::WellKnownGrpcStatus::Ok); + ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::filter), debug, + "rate limit fail, status={} msg={}", status, msg); callbacks_->complete(LimitStatus::Error, nullptr, nullptr, nullptr, EMPTY_STRING, nullptr); callbacks_ = nullptr; } diff --git a/source/extensions/filters/common/rbac/BUILD b/source/extensions/filters/common/rbac/BUILD index 5d8e3712c0e2a..a49234a5111ac 100644 --- a/source/extensions/filters/common/rbac/BUILD +++ b/source/extensions/filters/common/rbac/BUILD @@ -22,13 +22,17 @@ envoy_cc_library( envoy_cc_library( name = "matchers_lib", srcs = ["matchers.cc"], - hdrs = ["matchers.h"], + hdrs = [ + "matcher_extension.h", + "matchers.h", + ], external_deps = ["abseil_optional"], deps = [ "//envoy/http:header_map_interface", "//envoy/network:connection_interface", "//source/common/common:assert_lib", "//source/common/common:matchers_lib", + "//source/common/config:utility_lib", "//source/common/http:header_utility_lib", "//source/common/network:cidr_range_lib", "//source/extensions/filters/common/expr:evaluator_lib", diff --git a/source/extensions/filters/common/rbac/engine_impl.cc b/source/extensions/filters/common/rbac/engine_impl.cc index dbac3dee1135a..eecc9d3412662 100644 --- a/source/extensions/filters/common/rbac/engine_impl.cc +++ b/source/extensions/filters/common/rbac/engine_impl.cc @@ -11,7 +11,8 @@ namespace Common { namespace RBAC { RoleBasedAccessControlEngineImpl::RoleBasedAccessControlEngineImpl( - const envoy::config::rbac::v3::RBAC& rules, const EnforcementMode mode) + const envoy::config::rbac::v3::RBAC& rules, + ProtobufMessage::ValidationVisitor& validation_visitor, const EnforcementMode mode) : action_(rules.action()), mode_(mode) { // guard expression builder by presence of a condition in policies for (const auto& policy : rules.policies()) { @@ -22,7 +23,8 @@ RoleBasedAccessControlEngineImpl::RoleBasedAccessControlEngineImpl( } for (const auto& policy : rules.policies()) { - policies_.emplace(policy.first, std::make_unique(policy.second, builder_.get())); + policies_.emplace(policy.first, std::make_unique(policy.second, builder_.get(), + validation_visitor)); } } diff --git a/source/extensions/filters/common/rbac/engine_impl.h b/source/extensions/filters/common/rbac/engine_impl.h index 237d4fd79868f..431763919f7ce 100644 --- a/source/extensions/filters/common/rbac/engine_impl.h +++ b/source/extensions/filters/common/rbac/engine_impl.h @@ -28,6 +28,7 @@ enum class EnforcementMode { Enforced, Shadow }; class RoleBasedAccessControlEngineImpl : public RoleBasedAccessControlEngine, NonCopyable { public: RoleBasedAccessControlEngineImpl(const envoy::config::rbac::v3::RBAC& rules, + ProtobufMessage::ValidationVisitor& validation_visitor, const EnforcementMode mode = EnforcementMode::Enforced); bool handleAction(const Network::Connection& connection, diff --git a/source/extensions/filters/common/rbac/matcher_extension.h b/source/extensions/filters/common/rbac/matcher_extension.h new file mode 100644 index 0000000000000..c4cd53b488a09 --- /dev/null +++ b/source/extensions/filters/common/rbac/matcher_extension.h @@ -0,0 +1,57 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/config/typed_config.h" +#include "envoy/protobuf/message_validator.h" + +#include "source/extensions/filters/common/rbac/matchers.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace RBAC { + +// Matcher extension factory for RBAC filter. Matchers could be extended to support IP address, +// header value etc. +class MatcherExtensionFactory : public Envoy::Config::TypedFactory { +public: + /** + * Function to create Matchers from the specified config. + * @param config supplies the matcher configuration + * @return a new MatcherExtension + */ + virtual MatcherConstSharedPtr create(const Protobuf::Message& config, + ProtobufMessage::ValidationVisitor& validation_visitor) PURE; + + // @brief the category of the matcher extension type for factory registration. + std::string category() const override { return "envoy.rbac.matchers"; } +}; + +// Base RBAC matcher extension factory. This facilitates easy creation of matcher extension +// factories. The factory is templated by: +// M: Matcher extension implementation +// P: Protobuf definition of the matcher. +template +class BaseMatcherExtensionFactory : public Filters::Common::RBAC::MatcherExtensionFactory { +public: + Filters::Common::RBAC::MatcherConstSharedPtr + create(const Protobuf::Message& config, + ProtobufMessage::ValidationVisitor& validation_visitor) override { + const auto& matcher_typed_config = + MessageUtil::downcastAndValidate( + config, validation_visitor); + + const auto proto_message = MessageUtil::anyConvert

(matcher_typed_config.typed_config()); + + return std::make_shared(proto_message); + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { return std::make_unique

(); } +}; + +} // namespace RBAC +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/common/rbac/matchers.cc b/source/extensions/filters/common/rbac/matchers.cc index f0efb5c7d668b..1cfa006ff38d9 100644 --- a/source/extensions/filters/common/rbac/matchers.cc +++ b/source/extensions/filters/common/rbac/matchers.cc @@ -1,8 +1,10 @@ #include "source/extensions/filters/common/rbac/matchers.h" #include "envoy/config/rbac/v3/rbac.pb.h" +#include "envoy/upstream/upstream.h" -#include "source/common/common/assert.h" +#include "source/common/config/utility.h" +#include "source/extensions/filters/common/rbac/matcher_extension.h" namespace Envoy { namespace Extensions { @@ -10,12 +12,13 @@ namespace Filters { namespace Common { namespace RBAC { -MatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v3::Permission& permission) { +MatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v3::Permission& permission, + ProtobufMessage::ValidationVisitor& validation_visitor) { switch (permission.rule_case()) { case envoy::config::rbac::v3::Permission::RuleCase::kAndRules: - return std::make_shared(permission.and_rules()); + return std::make_shared(permission.and_rules(), validation_visitor); case envoy::config::rbac::v3::Permission::RuleCase::kOrRules: - return std::make_shared(permission.or_rules()); + return std::make_shared(permission.or_rules(), validation_visitor); case envoy::config::rbac::v3::Permission::RuleCase::kHeader: return std::make_shared(permission.header()); case envoy::config::rbac::v3::Permission::RuleCase::kDestinationIp: @@ -30,11 +33,16 @@ MatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v3::Permission& case envoy::config::rbac::v3::Permission::RuleCase::kMetadata: return std::make_shared(permission.metadata()); case envoy::config::rbac::v3::Permission::RuleCase::kNotRule: - return std::make_shared(permission.not_rule()); + return std::make_shared(permission.not_rule(), validation_visitor); case envoy::config::rbac::v3::Permission::RuleCase::kRequestedServerName: return std::make_shared(permission.requested_server_name()); case envoy::config::rbac::v3::Permission::RuleCase::kUrlPath: return std::make_shared(permission.url_path()); + case envoy::config::rbac::v3::Permission::RuleCase::kMatcher: { + auto& factory = + Config::Utility::getAndCheckFactory(permission.matcher()); + return factory.create(permission.matcher(), validation_visitor); + } default: NOT_REACHED_GCOVR_EXCL_LINE; } @@ -72,9 +80,10 @@ MatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v3::Principal& } } -AndMatcher::AndMatcher(const envoy::config::rbac::v3::Permission::Set& set) { +AndMatcher::AndMatcher(const envoy::config::rbac::v3::Permission::Set& set, + ProtobufMessage::ValidationVisitor& validation_visitor) { for (const auto& rule : set.rules()) { - matchers_.push_back(Matcher::create(rule)); + matchers_.push_back(Matcher::create(rule, validation_visitor)); } } @@ -96,9 +105,10 @@ bool AndMatcher::matches(const Network::Connection& connection, return true; } -OrMatcher::OrMatcher(const Protobuf::RepeatedPtrField& rules) { +OrMatcher::OrMatcher(const Protobuf::RepeatedPtrField& rules, + ProtobufMessage::ValidationVisitor& validation_visitor) { for (const auto& rule : rules) { - matchers_.push_back(Matcher::create(rule)); + matchers_.push_back(Matcher::create(rule, validation_visitor)); } } diff --git a/source/extensions/filters/common/rbac/matchers.h b/source/extensions/filters/common/rbac/matchers.h index 5623dee2b70a9..a43dbce5c72cf 100644 --- a/source/extensions/filters/common/rbac/matchers.h +++ b/source/extensions/filters/common/rbac/matchers.h @@ -47,7 +47,8 @@ class Matcher { * Creates a shared instance of a matcher based off the rules defined in the Permission config * proto message. */ - static MatcherConstSharedPtr create(const envoy::config::rbac::v3::Permission& permission); + static MatcherConstSharedPtr create(const envoy::config::rbac::v3::Permission& permission, + ProtobufMessage::ValidationVisitor& validation_visitor); /** * Creates a shared instance of a matcher based off the rules defined in the Principal config @@ -73,7 +74,8 @@ class AlwaysMatcher : public Matcher { */ class AndMatcher : public Matcher { public: - AndMatcher(const envoy::config::rbac::v3::Permission::Set& rules); + AndMatcher(const envoy::config::rbac::v3::Permission::Set& rules, + ProtobufMessage::ValidationVisitor& validation_visitor); AndMatcher(const envoy::config::rbac::v3::Principal::Set& ids); bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, @@ -89,9 +91,12 @@ class AndMatcher : public Matcher { */ class OrMatcher : public Matcher { public: - OrMatcher(const envoy::config::rbac::v3::Permission::Set& set) : OrMatcher(set.rules()) {} + OrMatcher(const envoy::config::rbac::v3::Permission::Set& set, + ProtobufMessage::ValidationVisitor& validation_visitor) + : OrMatcher(set.rules(), validation_visitor) {} OrMatcher(const envoy::config::rbac::v3::Principal::Set& set) : OrMatcher(set.ids()) {} - OrMatcher(const Protobuf::RepeatedPtrField& rules); + OrMatcher(const Protobuf::RepeatedPtrField& rules, + ProtobufMessage::ValidationVisitor& validation_visitor); OrMatcher(const Protobuf::RepeatedPtrField& ids); bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, @@ -103,8 +108,9 @@ class OrMatcher : public Matcher { class NotMatcher : public Matcher { public: - NotMatcher(const envoy::config::rbac::v3::Permission& permission) - : matcher_(Matcher::create(permission)) {} + NotMatcher(const envoy::config::rbac::v3::Permission& permission, + ProtobufMessage::ValidationVisitor& validation_visitor) + : matcher_(Matcher::create(permission, validation_visitor)) {} NotMatcher(const envoy::config::rbac::v3::Principal& principal) : matcher_(Matcher::create(principal)) {} @@ -203,8 +209,9 @@ class AuthenticatedMatcher : public Matcher { */ class PolicyMatcher : public Matcher, NonCopyable { public: - PolicyMatcher(const envoy::config::rbac::v3::Policy& policy, Expr::Builder* builder) - : permissions_(policy.permissions()), principals_(policy.principals()), + PolicyMatcher(const envoy::config::rbac::v3::Policy& policy, Expr::Builder* builder, + ProtobufMessage::ValidationVisitor& validation_visitor) + : permissions_(policy.permissions(), validation_visitor), principals_(policy.principals()), condition_(policy.condition()) { if (policy.has_condition()) { expr_ = Expr::createExpression(*builder, condition_); @@ -217,7 +224,6 @@ class PolicyMatcher : public Matcher, NonCopyable { private: const OrMatcher permissions_; const OrMatcher principals_; - const google::api::expr::v1alpha1::Expr condition_; Expr::ExpressionPtr expr_; }; diff --git a/source/extensions/filters/common/rbac/matchers/BUILD b/source/extensions/filters/common/rbac/matchers/BUILD new file mode 100644 index 0000000000000..3bfe2c93635f8 --- /dev/null +++ b/source/extensions/filters/common/rbac/matchers/BUILD @@ -0,0 +1,27 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_extension( + name = "upstream_ip_port_lib", + srcs = [ + "upstream_ip_port.cc", + ], + hdrs = [ + "upstream_ip_port.h", + ], + deps = [ + "//source/common/common:logger_lib", + "//source/common/stream_info:upstream_address_lib", + "//source/extensions/filters/common/rbac:matchers_lib", + "//source/extensions/filters/common/rbac:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/rbac/matchers/upstream_ip_port/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/common/rbac/matchers/upstream_ip_port.cc b/source/extensions/filters/common/rbac/matchers/upstream_ip_port.cc new file mode 100644 index 0000000000000..11396b754fee5 --- /dev/null +++ b/source/extensions/filters/common/rbac/matchers/upstream_ip_port.cc @@ -0,0 +1,86 @@ +#include "source/extensions/filters/common/rbac/matchers/upstream_ip_port.h" + +#include "envoy/config/core/v3/extension.pb.validate.h" +#include "envoy/registry/registry.h" + +#include "source/common/stream_info/upstream_address.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace RBAC { +namespace Matchers { + +using namespace Filters::Common::RBAC; + +UpstreamIpPortMatcher::UpstreamIpPortMatcher( + const envoy::extensions::rbac::matchers::upstream_ip_port::v3::UpstreamIpPortMatcher& proto) { + if (!proto.has_upstream_ip() && !proto.has_upstream_port_range()) { + throw EnvoyException( + "Invalid UpstreamIpPortMatcher configuration - missing `upstream_ip` and/or" + " `upstream_port_range`"); + } + + if (proto.has_upstream_ip()) { + cidr_ = Network::Address::CidrRange::create(proto.upstream_ip()); + } + if (proto.has_upstream_port_range()) { + port_ = proto.upstream_port_range(); + } +} + +bool UpstreamIpPortMatcher::matches(const Network::Connection&, + const Envoy::Http::RequestHeaderMap&, + const StreamInfo::StreamInfo& info) const { + + if (!info.filterState().hasDataWithName(StreamInfo::UpstreamAddress::key())) { + ENVOY_LOG_EVERY_POW_2( + warn, + "Did not find filter state with key: {}. Do you have a filter in the filter chain " + "before the RBAC filter which populates the filter state with upstream addresses ?", + StreamInfo::UpstreamAddress::key()); + + return false; + } + + const StreamInfo::UpstreamAddress& address_obj = + info.filterState().getDataReadOnly( + StreamInfo::UpstreamAddress::key()); + + if (cidr_) { + if (cidr_->isInRange(*address_obj.address_)) { + ENVOY_LOG(debug, "UpstreamIpPort matcher for cidr range: {} evaluated to: true", + cidr_->asString()); + + } else { + ENVOY_LOG(debug, "UpstreamIpPort matcher for cidr range: {} evaluated to: false", + cidr_->asString()); + return false; + } + } + + if (port_) { + const auto port = address_obj.address_->ip()->port(); + if (port >= port_->start() && port <= port_->end()) { + ENVOY_LOG(debug, "UpstreamIpPort matcher for port range: {{}, {}} evaluated to: true", + port_->start(), port_->end()); + } else { + ENVOY_LOG(debug, "UpstreamIpPort matcher for port range: {{}, {}} evaluated to: false", + port_->start(), port_->end()); + return false; + } + } + + ENVOY_LOG(trace, "UpstreamIpPort matcher evaluated to: true"); + return true; +} + +REGISTER_FACTORY(UpstreamIpPortMatcherFactory, MatcherExtensionFactory); + +} // namespace Matchers +} // namespace RBAC +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/common/rbac/matchers/upstream_ip_port.h b/source/extensions/filters/common/rbac/matchers/upstream_ip_port.h new file mode 100644 index 0000000000000..3a01a8a7afab3 --- /dev/null +++ b/source/extensions/filters/common/rbac/matchers/upstream_ip_port.h @@ -0,0 +1,47 @@ +#pragma once + +#include "envoy/extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.pb.validate.h" + +#include "source/common/common/logger.h" +#include "source/common/network/cidr_range.h" +#include "source/extensions/filters/common/rbac/matcher_extension.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace RBAC { +namespace Matchers { + +// RBAC matcher extension for matching upstream's IP address (and port range if configured). +// configuration with the resolved upstream IP (v4 and v6). +class UpstreamIpPortMatcher : public Filters::Common::RBAC::Matcher, + public Logger::Loggable { +public: + UpstreamIpPortMatcher( + const envoy::extensions::rbac::matchers::upstream_ip_port::v3::UpstreamIpPortMatcher& proto); + + // Matcher interface. + bool matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap&, + const StreamInfo::StreamInfo&) const override; + +private: + absl::optional cidr_; + absl::optional port_; +}; + +// Extension factory for UpstreamIpPortMatcher. +class UpstreamIpPortMatcherFactory + : public Filters::Common::RBAC::BaseMatcherExtensionFactory< + UpstreamIpPortMatcher, + envoy::extensions::rbac::matchers::upstream_ip_port::v3::UpstreamIpPortMatcher> { +public: + std::string name() const override { return "envoy.rbac.matchers.upstream.upstream_ip_port"; } +}; + +} // namespace Matchers +} // namespace RBAC +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/common/rbac/utility.h b/source/extensions/filters/common/rbac/utility.h index f4acc822a36fb..b96205648d290 100644 --- a/source/extensions/filters/common/rbac/utility.h +++ b/source/extensions/filters/common/rbac/utility.h @@ -38,17 +38,21 @@ RoleBasedAccessControlFilterStats generateStats(const std::string& prefix, const std::string& shadow_prefix, Stats::Scope& scope); template -std::unique_ptr createEngine(const ConfigType& config) { +std::unique_ptr +createEngine(const ConfigType& config, ProtobufMessage::ValidationVisitor& validation_visitor) { return config.has_rules() ? std::make_unique( - config.rules(), EnforcementMode::Enforced) + config.rules(), validation_visitor, EnforcementMode::Enforced) : nullptr; } template -std::unique_ptr createShadowEngine(const ConfigType& config) { - return config.has_shadow_rules() ? std::make_unique( - config.shadow_rules(), EnforcementMode::Shadow) - : nullptr; +std::unique_ptr +createShadowEngine(const ConfigType& config, + ProtobufMessage::ValidationVisitor& validation_visitor) { + return config.has_shadow_rules() + ? std::make_unique( + config.shadow_rules(), validation_visitor, EnforcementMode::Shadow) + : nullptr; } std::string responseDetail(const std::string& policy_id); diff --git a/source/extensions/filters/http/adaptive_concurrency/config.cc b/source/extensions/filters/http/adaptive_concurrency/config.cc index 5abc39bf6fd97..ed2182bafcde4 100644 --- a/source/extensions/filters/http/adaptive_concurrency/config.cc +++ b/source/extensions/filters/http/adaptive_concurrency/config.cc @@ -25,7 +25,7 @@ Http::FilterFactoryCb AdaptiveConcurrencyFilterFactory::createFilterFactoryFromP auto gradient_controller_config = Controller::GradientControllerConfig(config.gradient_controller_config(), context.runtime()); controller = std::make_shared( - std::move(gradient_controller_config), context.dispatcher(), context.runtime(), + std::move(gradient_controller_config), context.mainThreadDispatcher(), context.runtime(), acc_stats_prefix + "gradient_controller.", context.scope(), context.api().randomGenerator(), context.timeSource()); diff --git a/source/extensions/filters/http/admission_control/BUILD b/source/extensions/filters/http/admission_control/BUILD index 5192e11d74f97..a687266ec291e 100644 --- a/source/extensions/filters/http/admission_control/BUILD +++ b/source/extensions/filters/http/admission_control/BUILD @@ -31,7 +31,7 @@ envoy_cc_library( "//source/common/runtime:runtime_lib", "//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) @@ -45,6 +45,6 @@ envoy_cc_extension( "//source/extensions/filters/http/admission_control:admission_control_filter_lib", "//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/admission_control/admission_control.cc b/source/extensions/filters/http/admission_control/admission_control.cc index 3ceee04889eca..dd9a3579cd9ae 100644 --- a/source/extensions/filters/http/admission_control/admission_control.cc +++ b/source/extensions/filters/http/admission_control/admission_control.cc @@ -6,7 +6,7 @@ #include #include "envoy/common/random_generator.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" #include "envoy/runtime/runtime.h" diff --git a/source/extensions/filters/http/admission_control/admission_control.h b/source/extensions/filters/http/admission_control/admission_control.h index 4d921eddbc108..22f9767683169 100644 --- a/source/extensions/filters/http/admission_control/admission_control.h +++ b/source/extensions/filters/http/admission_control/admission_control.h @@ -6,7 +6,7 @@ #include "envoy/common/random_generator.h" #include "envoy/common/time.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" #include "envoy/http/codes.h" #include "envoy/http/filter.h" #include "envoy/runtime/runtime.h" @@ -45,7 +45,7 @@ struct AdmissionControlStats { }; using AdmissionControlProto = - envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl; + envoy::extensions::filters::http::admission_control::v3::AdmissionControl; /** * Configuration for the admission control filter. diff --git a/source/extensions/filters/http/admission_control/config.cc b/source/extensions/filters/http/admission_control/config.cc index e28e0445b6435..1f08de2f00398 100644 --- a/source/extensions/filters/http/admission_control/config.cc +++ b/source/extensions/filters/http/admission_control/config.cc @@ -1,8 +1,8 @@ #include "source/extensions/filters/http/admission_control/config.h" #include "envoy/common/exception.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "envoy/registry/registry.h" #include "source/common/common/enum_to_int.h" @@ -18,7 +18,7 @@ namespace AdmissionControl { static constexpr std::chrono::seconds defaultSamplingWindow{30}; Http::FilterFactoryCb AdmissionControlFilterFactory::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& config, + const envoy::extensions::filters::http::admission_control::v3::AdmissionControl& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { if (config.has_sr_threshold() && config.sr_threshold().default_value().value() < 1.0) { diff --git a/source/extensions/filters/http/admission_control/config.h b/source/extensions/filters/http/admission_control/config.h index e25289f6e76be..ab8ecb79f5a19 100644 --- a/source/extensions/filters/http/admission_control/config.h +++ b/source/extensions/filters/http/admission_control/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -15,13 +15,12 @@ namespace AdmissionControl { */ class AdmissionControlFilterFactory : public Common::FactoryBase< - envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl> { + envoy::extensions::filters::http::admission_control::v3::AdmissionControl> { public: AdmissionControlFilterFactory() : FactoryBase("envoy.filters.http.admission_control") {} Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& - proto_config, + const envoy::extensions::filters::http::admission_control::v3::AdmissionControl& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; }; diff --git a/source/extensions/filters/http/admission_control/evaluators/BUILD b/source/extensions/filters/http/admission_control/evaluators/BUILD index cddd0f2f0a43f..450138656e277 100644 --- a/source/extensions/filters/http/admission_control/evaluators/BUILD +++ b/source/extensions/filters/http/admission_control/evaluators/BUILD @@ -21,6 +21,6 @@ envoy_cc_library( deps = [ "//envoy/grpc:status", "//source/common/common:enum_to_int", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h index 551975ead8818..f55c2fce24649 100644 --- a/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h +++ b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h @@ -2,8 +2,8 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/extensions/filters/http/admission_control/evaluators/response_evaluator.h" @@ -14,8 +14,8 @@ namespace AdmissionControl { class SuccessCriteriaEvaluator : public ResponseEvaluator { public: - using SuccessCriteria = envoy::extensions::filters::http::admission_control::v3alpha:: - AdmissionControl::SuccessCriteria; + using SuccessCriteria = + envoy::extensions::filters::http::admission_control::v3::AdmissionControl::SuccessCriteria; SuccessCriteriaEvaluator(const SuccessCriteria& evaluation_criteria); // ResponseEvaluator bool isHttpSuccess(uint64_t code) const override; diff --git a/source/extensions/filters/http/alternate_protocols_cache/config.cc b/source/extensions/filters/http/alternate_protocols_cache/config.cc index 61327b29b1d92..147658c79a140 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/config.cc +++ b/source/extensions/filters/http/alternate_protocols_cache/config.cc @@ -16,11 +16,14 @@ Http::FilterFactoryCb AlternateProtocolsCacheFilterFactory::createFilterFactoryF proto_config, const std::string&, Server::Configuration::FactoryContext& context) { Http::AlternateProtocolsCacheManagerFactoryImpl alternate_protocol_cache_manager_factory( - context.singletonManager(), context.dispatcher().timeSource(), context.threadLocal()); - FilterConfigSharedPtr filter_config(std::make_shared( - proto_config, alternate_protocol_cache_manager_factory, context.dispatcher().timeSource())); + context.singletonManager(), context.threadLocal(), {context}); + FilterConfigSharedPtr filter_config( + std::make_shared(proto_config, alternate_protocol_cache_manager_factory, + context.mainThreadDispatcher().timeSource())); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamEncoderFilter(std::make_shared(filter_config)); + callbacks.addStreamEncoderFilter( + std::make_shared(filter_config, callbacks.dispatcher())); }; } diff --git a/source/extensions/filters/http/alternate_protocols_cache/filter.cc b/source/extensions/filters/http/alternate_protocols_cache/filter.cc index 75cc99c9fb325..2d87b6fbef7a1 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/filter.cc +++ b/source/extensions/filters/http/alternate_protocols_cache/filter.cc @@ -5,10 +5,10 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.pb.h" +#include "source/common/http/alternate_protocols_cache_impl.h" +#include "source/common/http/alternate_protocols_cache_manager_impl.h" #include "source/common/http/headers.h" -#include "quiche/spdy/core/spdy_alt_svc_wire_format.h" - namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -24,17 +24,18 @@ FilterConfig::FilterConfig( : alternate_protocol_cache_manager_(alternate_protocol_cache_manager_factory.get()), proto_config_(proto_config), time_source_(time_source) {} -Http::AlternateProtocolsCacheSharedPtr FilterConfig::getAlternateProtocolCache() { +Http::AlternateProtocolsCacheSharedPtr +FilterConfig::getAlternateProtocolCache(Event::Dispatcher& dispatcher) { return proto_config_.has_alternate_protocols_cache_options() ? alternate_protocol_cache_manager_->getCache( - proto_config_.alternate_protocols_cache_options()) + proto_config_.alternate_protocols_cache_options(), dispatcher) : nullptr; } void Filter::onDestroy() {} -Filter::Filter(const FilterConfigSharedPtr& config) - : cache_(config->getAlternateProtocolCache()), time_source_(config->timeSource()) {} +Filter::Filter(const FilterConfigSharedPtr& config, Event::Dispatcher& dispatcher) + : cache_(config->getAlternateProtocolCache(dispatcher)), time_source_(config->timeSource()) {} Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) { if (!cache_) { @@ -44,23 +45,21 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers if (alt_svc.empty()) { return Http::FilterHeadersStatus::Continue; } + std::vector protocols; for (size_t i = 0; i < alt_svc.size(); ++i) { - spdy::SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector; - if (!spdy::SpdyAltSvcWireFormat::ParseHeaderFieldValue(alt_svc[i]->value().getStringView(), - &altsvc_vector)) { + absl::optional> + potential_protocols = Http::AlternateProtocolsCacheImpl::protocolsFromString( + alt_svc[i]->value().getStringView(), time_source_); + if (!potential_protocols.has_value()) { ENVOY_LOG(trace, "Invalid Alt-Svc header received: '{}'", alt_svc[i]->value().getStringView()); return Http::FilterHeadersStatus::Continue; } - for (const auto& alt_svc : altsvc_vector) { - MonotonicTime expiration = - time_source_.monotonicTime() + std::chrono::seconds(alt_svc.max_age); - Http::AlternateProtocolsCache::AlternateProtocol protocol(alt_svc.protocol_id, alt_svc.host, - alt_svc.port, expiration); - protocols.push_back(protocol); - } + protocols.insert(protocols.end(), std::make_move_iterator(potential_protocols.value().begin()), + std::make_move_iterator(potential_protocols.value().end())); } + // The upstream host is used here, instead of the :authority request header because // Envoy routes request to upstream hosts not to origin servers directly. This choice would // allow HTTP/3 to be used on a per-upstream host basis, even for origins which are load diff --git a/source/extensions/filters/http/alternate_protocols_cache/filter.h b/source/extensions/filters/http/alternate_protocols_cache/filter.h index a77d30f672a1f..3135afe4bd9b9 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/filter.h +++ b/source/extensions/filters/http/alternate_protocols_cache/filter.h @@ -23,7 +23,7 @@ class FilterConfig { TimeSource& time_source); // Returns the alternate protocols cache for the current thread. - Http::AlternateProtocolsCacheSharedPtr getAlternateProtocolCache(); + Http::AlternateProtocolsCacheSharedPtr getAlternateProtocolCache(Event::Dispatcher& dispatcher); TimeSource& timeSource() { return time_source_; } @@ -39,9 +39,10 @@ using FilterConfigSharedPtr = std::shared_ptr; * Alternate protocol cache filter which parses the alt-svc response header and updates * the cache accordingly. */ -class Filter : public Http::PassThroughEncoderFilter, Logger::Loggable { +class Filter : public Http::PassThroughEncoderFilter, + Logger::Loggable { public: - explicit Filter(const FilterConfigSharedPtr& config); + Filter(const FilterConfigSharedPtr& config, Event::Dispatcher& thread_local_dispatcher); // Http::PassThroughEncoderFilter Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& header, diff --git a/source/extensions/filters/http/aws_lambda/config.cc b/source/extensions/filters/http/aws_lambda/config.cc index a538f07847ddd..a3b1838ff1949 100644 --- a/source/extensions/filters/http/aws_lambda/config.cc +++ b/source/extensions/filters/http/aws_lambda/config.cc @@ -47,7 +47,8 @@ Http::FilterFactoryCb AwsLambdaFilterFactory::createFilterFactoryFromProtoTyped( } const std::string region = arn->region(); auto signer = std::make_shared( - service_name, region, std::move(credentials_provider), context.dispatcher().timeSource()); + service_name, region, std::move(credentials_provider), + context.mainThreadDispatcher().timeSource()); FilterSettings filter_settings{*arn, getInvocationMode(proto_config), proto_config.payload_passthrough()}; diff --git a/source/extensions/filters/http/aws_request_signing/config.cc b/source/extensions/filters/http/aws_request_signing/config.cc index 02408ec90a091..0d2e9bc97c1f4 100644 --- a/source/extensions/filters/http/aws_request_signing/config.cc +++ b/source/extensions/filters/http/aws_request_signing/config.cc @@ -23,7 +23,7 @@ Http::FilterFactoryCb AwsRequestSigningFilterFactory::createFilterFactoryFromPro context.api(), Extensions::Common::Aws::Utility::metadataFetcher); auto signer = std::make_unique( config.service_name(), config.region(), credentials_provider, - context.dispatcher().timeSource()); + context.mainThreadDispatcher().timeSource()); auto filter_config = std::make_shared(std::move(signer), stats_prefix, context.scope(), diff --git a/source/extensions/filters/http/bandwidth_limit/BUILD b/source/extensions/filters/http/bandwidth_limit/BUILD index 3f3b5e3f72735..b2c1f4ccc49a6 100644 --- a/source/extensions/filters/http/bandwidth_limit/BUILD +++ b/source/extensions/filters/http/bandwidth_limit/BUILD @@ -28,7 +28,7 @@ envoy_cc_library( "//source/common/runtime:runtime_lib", "//source/common/stats:timespan_lib", "//source/extensions/filters/http/common:stream_rate_limiter_lib", - "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3:pkg_cc_proto", ], ) @@ -41,6 +41,6 @@ envoy_cc_extension( "//envoy/http:filter_interface", "//source/common/protobuf:utility_lib", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc index 6d831d6de8903..7da4185786293 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc @@ -8,7 +8,7 @@ #include "source/common/http/utility.h" #include "source/common/stats/timespan_impl.h" -using envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit; +using envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit; using Envoy::Extensions::HttpFilters::Common::StreamRateLimiter; namespace Envoy { diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h index f5bac46426425..876ef673e39f8 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h @@ -5,7 +5,7 @@ #include #include -#include "envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.pb.h" +#include "envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.pb.h" #include "envoy/http/filter.h" #include "envoy/runtime/runtime.h" #include "envoy/stats/scope.h" @@ -55,12 +55,11 @@ struct BandwidthLimitStats { class FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig { public: using EnableMode = - envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit_EnableMode; + envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit_EnableMode; - FilterConfig( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& config, - Stats::Scope& scope, Runtime::Loader& runtime, TimeSource& time_source, - bool per_route = false); + FilterConfig(const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& config, + Stats::Scope& scope, Runtime::Loader& runtime, TimeSource& time_source, + bool per_route = false); ~FilterConfig() override = default; Runtime::Loader& runtime() { return runtime_; } BandwidthLimitStats& stats() const { return stats_; } diff --git a/source/extensions/filters/http/bandwidth_limit/config.cc b/source/extensions/filters/http/bandwidth_limit/config.cc index 95b62c134175a..40f59d1da2252 100644 --- a/source/extensions/filters/http/bandwidth_limit/config.cc +++ b/source/extensions/filters/http/bandwidth_limit/config.cc @@ -13,7 +13,7 @@ namespace HttpFilters { namespace BandwidthLimitFilter { Http::FilterFactoryCb BandwidthLimitFilterConfig::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& proto_config, + const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { FilterConfigSharedPtr filter_config = std::make_shared( proto_config, context.scope(), context.runtime(), context.timeSource()); @@ -24,7 +24,7 @@ Http::FilterFactoryCb BandwidthLimitFilterConfig::createFilterFactoryFromProtoTy Router::RouteSpecificFilterConfigConstSharedPtr BandwidthLimitFilterConfig::createRouteSpecificFilterConfigTyped( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& proto_config, + const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& proto_config, Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) { return std::make_shared(proto_config, context.scope(), context.runtime(), context.timeSource(), true); diff --git a/source/extensions/filters/http/bandwidth_limit/config.h b/source/extensions/filters/http/bandwidth_limit/config.h index b29a3ac2320b2..e167d3136e2a5 100644 --- a/source/extensions/filters/http/bandwidth_limit/config.h +++ b/source/extensions/filters/http/bandwidth_limit/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.pb.h" -#include "envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.pb.validate.h" +#include "envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.pb.h" +#include "envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -15,19 +15,17 @@ namespace BandwidthLimitFilter { */ class BandwidthLimitFilterConfig : public Common::FactoryBase< - envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit> { + envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit> { public: BandwidthLimitFilterConfig() : FactoryBase("envoy.filters.http.bandwidth_limit") {} private: Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& - proto_config, + const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& - proto_config, + const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& proto_config, Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) override; }; diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 53219e59f3891..5c31e78ff7cfc 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -28,7 +28,7 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/http:utility_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", - "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cache/v3:pkg_cc_proto", ], ) @@ -77,7 +77,7 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cache/v3:pkg_cc_proto", ], ) @@ -96,7 +96,7 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/protobuf", "@com_google_absl//absl/container:btree", - "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cache/v3:pkg_cc_proto", ], ) @@ -116,6 +116,6 @@ envoy_cc_extension( deps = [ ":cache_filter_lib", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cache/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index 768fd0ae5b99e..da0b86eb05eec 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -29,9 +29,9 @@ struct CacheResponseCodeDetailValues { using CacheResponseCodeDetails = ConstSingleton; -CacheFilter::CacheFilter( - const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, const std::string&, - Stats::Scope&, TimeSource& time_source, HttpCache& http_cache) +CacheFilter::CacheFilter(const envoy::extensions::filters::http::cache::v3::CacheConfig& config, + const std::string&, Stats::Scope&, TimeSource& time_source, + HttpCache& http_cache) : time_source_(time_source), cache_(http_cache), vary_allow_list_(config.allowed_vary_headers()) {} diff --git a/source/extensions/filters/http/cache/cache_filter.h b/source/extensions/filters/http/cache/cache_filter.h index 418a770b9e2ca..77e098a99d134 100644 --- a/source/extensions/filters/http/cache/cache_filter.h +++ b/source/extensions/filters/http/cache/cache_filter.h @@ -5,7 +5,7 @@ #include #include -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.h" #include "source/common/common/logger.h" #include "source/extensions/filters/http/cache/cache_headers_utils.h" @@ -24,7 +24,7 @@ class CacheFilter : public Http::PassThroughFilter, public Logger::Loggable, public std::enable_shared_from_this { public: - CacheFilter(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, + CacheFilter(const envoy::extensions::filters::http::cache::v3::CacheConfig& config, const std::string& stats_prefix, Stats::Scope& scope, TimeSource& time_source, HttpCache& http_cache); // Http::StreamFilterBase diff --git a/source/extensions/filters/http/cache/cache_headers_utils.h b/source/extensions/filters/http/cache/cache_headers_utils.h index 06737d7a2b2c5..c5a219f0fedd8 100644 --- a/source/extensions/filters/http/cache/cache_headers_utils.h +++ b/source/extensions/filters/http/cache/cache_headers_utils.h @@ -1,7 +1,7 @@ #pragma once #include "envoy/common/time.h" -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.h" #include "envoy/http/header_map.h" #include "source/common/common/matchers.h" diff --git a/source/extensions/filters/http/cache/config.cc b/source/extensions/filters/http/cache/config.cc index f1d713e8d1c19..33f719e477784 100644 --- a/source/extensions/filters/http/cache/config.cc +++ b/source/extensions/filters/http/cache/config.cc @@ -8,7 +8,7 @@ namespace HttpFilters { namespace Cache { Http::FilterFactoryCb CacheFilterFactory::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, + const envoy::extensions::filters::http::cache::v3::CacheConfig& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { const std::string type{TypeUtil::typeUrlToDescriptorFullName(config.typed_config().type_url())}; HttpCacheFactory* const http_cache_factory = diff --git a/source/extensions/filters/http/cache/config.h b/source/extensions/filters/http/cache/config.h index 341a054344d52..2f87062b86a4e 100644 --- a/source/extensions/filters/http/cache/config.h +++ b/source/extensions/filters/http/cache/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.validate.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -11,13 +11,13 @@ namespace HttpFilters { namespace Cache { class CacheFilterFactory - : public Common::FactoryBase { + : public Common::FactoryBase { public: CacheFilterFactory() : FactoryBase("envoy.filters.http.cache") {} private: Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, + const envoy::extensions::filters::http::cache::v3::CacheConfig& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; }; diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index 7438646649dd3..47ab926e02bd2 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -7,7 +7,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/time.h" #include "envoy/config/typed_config.h" -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.h" #include "envoy/http/header_map.h" #include "source/common/common/assert.h" @@ -369,7 +369,7 @@ class HttpCacheFactory : public Config::TypedFactory { // Returns an HttpCache that will remain valid indefinitely (at least as long // as the calling CacheFilter). virtual HttpCache& - getCache(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config) PURE; + getCache(const envoy::extensions::filters::http::cache::v3::CacheConfig& config) PURE; ~HttpCacheFactory() override = default; private: diff --git a/source/extensions/filters/http/cache/simple_http_cache/BUILD b/source/extensions/filters/http/cache/simple_http_cache/BUILD index 5b25659f64b0f..f218cb4552d3d 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/BUILD +++ b/source/extensions/filters/http/cache/simple_http_cache/BUILD @@ -24,6 +24,6 @@ envoy_cc_extension( "//source/common/protobuf", "//source/extensions/filters/http/cache:http_cache_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", - "@envoy_api//envoy/extensions/cache/simple_http_cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/cache/simple_http_cache/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc index dc2947c457d93..0564e854754f1 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc @@ -1,6 +1,6 @@ #include "source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h" -#include "envoy/extensions/cache/simple_http_cache/v3alpha/config.pb.h" +#include "envoy/extensions/cache/simple_http_cache/v3/config.pb.h" #include "envoy/registry/registry.h" #include "source/common/buffer/buffer_impl.h" @@ -109,6 +109,24 @@ LookupContextPtr SimpleHttpCache::makeLookupContext(LookupRequest&& request) { return std::make_unique(*this, std::move(request)); } +const absl::flat_hash_set SimpleHttpCache::headersNotToUpdate() { + CONSTRUCT_ON_FIRST_USE( + absl::flat_hash_set, + // Content range should not be changed upon validation + Http::Headers::get().ContentRange, + + // Headers that describe the body content should never be updated. + Http::Headers::get().ContentLength, + + // It does not make sense for this level of the code to be updating the ETag, when + // presumably the cached_response_headers reflect this specific ETag. + Http::CustomHeaders::get().Etag, + + // We don't update the cached response on a Vary; we just delete it + // entirely. So don't bother copying over the Vary header. + Http::CustomHeaders::get().Vary); +} + void SimpleHttpCache::updateHeaders(const LookupContext& lookup_context, const Http::ResponseHeaderMap& response_headers, const ResponseMetadata& metadata) { @@ -127,19 +145,34 @@ void SimpleHttpCache::updateHeaders(const LookupContext& lookup_context, return; } - // https://www.rfc-editor.org/rfc/pdfrfc/rfc7234.txt.pdf - // 4.3.4 Freshening Stored Responses upon Validation - // use other header fields provided in the 304 (Not Modified) - // response to replace all instances of the corresponding header - // fields in the stored response. - // // Assumptions: // 1. The internet is fast, i.e. we get the result as soon as the server sends it. - // Race conditions would not be possible because we are always processing up-to-date data. + // Race conditions would not be possible because we are always processing up-to-date data. // 2. No key collision for etag. Therefore, if etag matches it's the same resource. // 3. Backend is correct. etag is being used as a unique identifier to the resource - // TODO(tangsaidi) merge the header map instead of replacing it according to rfc7234 - entry.response_headers_ = Http::createHeaderMap(response_headers); + + // use other header fields provided in the new response to replace all instances + // of the corresponding header fields in the stored response + + // `updatedHeaderFields` makes sure each field is only removed when we update the header + // field for the first time to handle the case where incoming headers have repeated values + absl::flat_hash_set updatedHeaderFields; + response_headers.iterate( + [&entry, &updatedHeaderFields]( + const Http::HeaderEntry& incoming_response_header) -> Http::HeaderMap::Iterate { + Http::LowerCaseString lower_case_key{incoming_response_header.key().getStringView()}; + absl::string_view incoming_value{incoming_response_header.value().getStringView()}; + if (headersNotToUpdate().contains(lower_case_key)) { + return Http::HeaderMap::Iterate::Continue; + } + if (!updatedHeaderFields.contains(lower_case_key)) { + entry.response_headers_->setCopy(lower_case_key, incoming_value); + updatedHeaderFields.insert(lower_case_key); + } else { + entry.response_headers_->addCopy(lower_case_key, incoming_value); + } + return Http::HeaderMap::Iterate::Continue; + }); entry.metadata_ = metadata; } @@ -258,11 +291,10 @@ class SimpleHttpCacheFactory : public HttpCacheFactory { // From TypedFactory ProtobufTypes::MessagePtr createEmptyConfigProto() override { return std::make_unique< - envoy::extensions::cache::simple_http_cache::v3alpha::SimpleHttpCacheConfig>(); + envoy::extensions::cache::simple_http_cache::v3::SimpleHttpCacheConfig>(); } // From HttpCacheFactory - HttpCache& - getCache(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig&) override { + HttpCache& getCache(const envoy::extensions::filters::http::cache::v3::CacheConfig&) override { return cache_; } diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h index 91d0e85dc5e78..883143436d7b4 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h @@ -8,7 +8,7 @@ #include "absl/synchronization/mutex.h" // included to make code_format happy -#include "envoy/extensions/cache/simple_http_cache/v3alpha/config.pb.h" +#include "envoy/extensions/cache/simple_http_cache/v3/config.pb.h" namespace Envoy { namespace Extensions { @@ -28,6 +28,12 @@ class SimpleHttpCache : public HttpCache { Entry varyLookup(const LookupRequest& request, const Http::ResponseHeaderMapPtr& response_headers); + // A list of headers that we do not want to update upon validation + // We skip these headers because either it's updated by other application logic + // or they are fall into categories defined in the IETF doc below + // https://www.ietf.org/archive/id/draft-ietf-httpbis-cache-18.html s3.2 + static const absl::flat_hash_set headersNotToUpdate(); + public: // HttpCache LookupContextPtr makeLookupContext(LookupRequest&& request) override; diff --git a/source/extensions/filters/http/cdn_loop/BUILD b/source/extensions/filters/http/cdn_loop/BUILD index 1d7d680bf80bb..6ba442abac790 100644 --- a/source/extensions/filters/http/cdn_loop/BUILD +++ b/source/extensions/filters/http/cdn_loop/BUILD @@ -53,6 +53,6 @@ envoy_cc_extension( "//envoy/server:factory_context_interface", "//source/common/common:statusor_lib", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/cdn_loop/config.cc b/source/extensions/filters/http/cdn_loop/config.cc index 93c3d155a8ad8..2b910a29a89fb 100644 --- a/source/extensions/filters/http/cdn_loop/config.cc +++ b/source/extensions/filters/http/cdn_loop/config.cc @@ -3,7 +3,7 @@ #include #include "envoy/common/exception.h" -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.h" #include "envoy/http/filter.h" #include "envoy/registry/registry.h" #include "envoy/server/factory_context.h" @@ -22,7 +22,7 @@ using ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParseContext; using ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParsedCdnId; Http::FilterFactoryCb CdnLoopFilterFactory::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig& config, + const envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig& config, const std::string& /*stats_prefix*/, Server::Configuration::FactoryContext& /*context*/) { StatusOr context = parseCdnId(ParseContext(config.cdn_id())); if (!context.ok() || !context->context().atEnd()) { diff --git a/source/extensions/filters/http/cdn_loop/config.h b/source/extensions/filters/http/cdn_loop/config.h index 5d9fea5bd1fe9..15b6f7fa4a476 100644 --- a/source/extensions/filters/http/cdn_loop/config.h +++ b/source/extensions/filters/http/cdn_loop/config.h @@ -2,8 +2,8 @@ #include -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.validate.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.validate.h" #include "envoy/http/filter.h" #include "envoy/server/factory_context.h" @@ -15,14 +15,13 @@ namespace HttpFilters { namespace CdnLoop { class CdnLoopFilterFactory - : public Common::FactoryBase< - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig> { + : public Common::FactoryBase { public: CdnLoopFilterFactory() : FactoryBase("envoy.filters.http.cdn_loop") {} private: Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig& config, + const envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; }; diff --git a/source/extensions/filters/http/common/jwks_fetcher.cc b/source/extensions/filters/http/common/jwks_fetcher.cc index 4b2e50594fd13..70cdbbc4c77bf 100644 --- a/source/extensions/filters/http/common/jwks_fetcher.cc +++ b/source/extensions/filters/http/common/jwks_fetcher.cc @@ -1,5 +1,6 @@ #include "source/extensions/filters/http/common/jwks_fetcher.h" +#include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/http_uri.pb.h" #include "source/common/common/enum_to_int.h" @@ -16,60 +17,6 @@ namespace Extensions { namespace HttpFilters { namespace Common { namespace { -// Parameters of the jittered backoff strategy. -static constexpr uint32_t RetryInitialDelayMilliseconds = 1000; -static constexpr uint32_t RetryMaxDelayMilliseconds = 10 * 1000; - -/** - * @details construct an envoy.config.route.v3.RetryPolicy protobuf message - * from a less feature rich envoy.config.core.v3.RetryPolicy one. - * - * this is about limiting the user's possibilities. - * just doing truncated exponential backoff - * - * the upstream.use_retry feature flag will need to be turned on (default) - * for this to work. - * - * @param retry policy from the RemoteJwks proto - * @return a retry policy usable by the http async client. - */ -envoy::config::route::v3::RetryPolicy -adaptRetryPolicy(const envoy::config::core::v3::RetryPolicy& core_retry_policy) { - envoy::config::route::v3::RetryPolicy route_retry_policy; - - uint64_t base_interval_ms = RetryInitialDelayMilliseconds; - uint64_t max_interval_ms = RetryMaxDelayMilliseconds; - - if (core_retry_policy.has_retry_back_off()) { - const auto& core_back_off = core_retry_policy.retry_back_off(); - - base_interval_ms = PROTOBUF_GET_MS_REQUIRED(core_back_off, base_interval); - - max_interval_ms = - PROTOBUF_GET_MS_OR_DEFAULT(core_back_off, max_interval, base_interval_ms * 10); - - if (max_interval_ms < base_interval_ms) { - throw EnvoyException("max_interval must be greater than or equal to the base_interval"); - } - } - - route_retry_policy.mutable_num_retries()->set_value( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(core_retry_policy, num_retries, 1)); - - auto* route_mutable_back_off = route_retry_policy.mutable_retry_back_off(); - - route_mutable_back_off->mutable_base_interval()->CopyFrom( - Protobuf::util::TimeUtil::MillisecondsToDuration(base_interval_ms)); - route_mutable_back_off->mutable_max_interval()->CopyFrom( - Protobuf::util::TimeUtil::MillisecondsToDuration(max_interval_ms)); - - // set all the other fields with appropriate values. - route_retry_policy.set_retry_on("5xx,gateway-error,connect-failure,reset"); - route_retry_policy.mutable_per_try_timeout()->CopyFrom( - route_retry_policy.retry_back_off().max_interval()); - - return route_retry_policy; -} class JwksFetcherImpl : public JwksFetcher, public Logger::Loggable, @@ -78,10 +25,6 @@ class JwksFetcherImpl : public JwksFetcher, JwksFetcherImpl(Upstream::ClusterManager& cm, const RemoteJwks& remote_jwks) : cm_(cm), remote_jwks_(remote_jwks) { ENVOY_LOG(trace, "{}", __func__); - - if (remote_jwks_.has_retry_policy()) { - route_retry_policy_ = adaptRetryPolicy(remote_jwks_.retry_policy()); - } } ~JwksFetcherImpl() override { cancel(); } @@ -122,7 +65,10 @@ class JwksFetcherImpl : public JwksFetcher, .setChildSpanName("JWT Remote PubKey Fetch"); if (remote_jwks_.has_retry_policy()) { - options.setRetryPolicy(route_retry_policy_.value()); + envoy::config::route::v3::RetryPolicy route_retry_policy = + Http::Utility::convertCoreToRouteRetryPolicy(remote_jwks_.retry_policy(), + "5xx,gateway-error,connect-failure,reset"); + options.setRetryPolicy(route_retry_policy); options.setBufferBodyForRetry(true); } @@ -178,11 +124,6 @@ class JwksFetcherImpl : public JwksFetcher, const RemoteJwks& remote_jwks_; Http::AsyncClient::Request* request_{}; - // http async client uses richer semantics than the ones allowed in RemoteJwks - // envoy.config.route.v3.RetryPolicy vs envoy.config.core.v3.RetryPolicy - // mapping is done in constructor and reused. - absl::optional route_retry_policy_{absl::nullopt}; - void reset() { request_ = nullptr; receiver_ = nullptr; diff --git a/source/extensions/filters/http/composite/config.cc b/source/extensions/filters/http/composite/config.cc index 4d399813a264e..5702472fa0162 100644 --- a/source/extensions/filters/http/composite/config.cc +++ b/source/extensions/filters/http/composite/config.cc @@ -19,7 +19,7 @@ Http::FilterFactoryCb CompositeFilterFactory::createFilterFactoryFromProtoTyped( ALL_COMPOSITE_FILTER_STATS(POOL_COUNTER_PREFIX(factory_context.scope(), prefix))}); return [stats](Http::FilterChainFactoryCallbacks& callbacks) -> void { - auto filter = std::make_shared(*stats); + auto filter = std::make_shared(*stats, callbacks.dispatcher()); callbacks.addStreamFilter(filter); callbacks.addAccessLogHandler(filter); }; diff --git a/source/extensions/filters/http/composite/factory_wrapper.h b/source/extensions/filters/http/composite/factory_wrapper.h index c436f52165e2f..ec8d98e8a6335 100644 --- a/source/extensions/filters/http/composite/factory_wrapper.h +++ b/source/extensions/filters/http/composite/factory_wrapper.h @@ -14,7 +14,8 @@ class Filter; // the lifetime of this wrapper by appending them to the errors_ field. This should be checked // afterwards to determine whether invalid callbacks were called. struct FactoryCallbacksWrapper : public Http::FilterChainFactoryCallbacks { - explicit FactoryCallbacksWrapper(Filter& filter) : filter_(filter) {} + FactoryCallbacksWrapper(Filter& filter, Event::Dispatcher& dispatcher) + : filter_(filter), dispatcher_(dispatcher) {} void addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr filter) override; void addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr, @@ -26,8 +27,10 @@ struct FactoryCallbacksWrapper : public Http::FilterChainFactoryCallbacks { void addStreamFilter(Http::StreamFilterSharedPtr, Matcher::MatchTreeSharedPtr) override; void addAccessLogHandler(AccessLog::InstanceSharedPtr) override; + Event::Dispatcher& dispatcher() override { return dispatcher_; } Filter& filter_; + Event::Dispatcher& dispatcher_; using FilterAlternative = absl::variant(); - FactoryCallbacksWrapper wrapper(*this); + FactoryCallbacksWrapper wrapper(*this, dispatcher_); composite_action.createFilters(wrapper); if (!wrapper.errors_.empty()) { diff --git a/source/extensions/filters/http/composite/filter.h b/source/extensions/filters/http/composite/filter.h index 2df7b4bc952ad..a225653a0e6f5 100644 --- a/source/extensions/filters/http/composite/filter.h +++ b/source/extensions/filters/http/composite/filter.h @@ -29,7 +29,8 @@ class Filter : public Http::StreamFilter, public AccessLog::Instance, Logger::Loggable { public: - explicit Filter(FilterStats& stats) : decoded_headers_(false), stats_(stats) {} + Filter(FilterStats& stats, Event::Dispatcher& dispatcher) + : dispatcher_(dispatcher), decoded_headers_(false), stats_(stats) {} // Http::StreamDecoderFilter Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, @@ -78,6 +79,7 @@ class Filter : public Http::StreamFilter, private: friend FactoryCallbacksWrapper; + Event::Dispatcher& dispatcher_; // Use these to track whether we are allowed to insert a specific kind of filter. These mainly // serve to surface an easier to understand error, as attempting to insert a filter at a later // time will result in various FM assertions firing. diff --git a/source/extensions/filters/http/dynamic_forward_proxy/BUILD b/source/extensions/filters/http/dynamic_forward_proxy/BUILD index ef0a6fb5c67ac..1bc118a341f8b 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -16,6 +16,7 @@ envoy_cc_library( deps = [ "//envoy/http:filter_interface", "//source/common/http:header_utility_lib", + "//source/common/stream_info:upstream_address_lib", "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", "//source/extensions/filters/http/common:pass_through_filter_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", diff --git a/source/extensions/filters/http/dynamic_forward_proxy/config.cc b/source/extensions/filters/http/dynamic_forward_proxy/config.cc index 3f58fa19ca06a..75b69d19876d5 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/config.cc @@ -15,8 +15,7 @@ Http::FilterFactoryCb DynamicForwardProxyFilterFactory::createFilterFactoryFromP const envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.threadLocal(), context.api(), - context.runtime(), context.scope(), context.messageValidationVisitor()); + context); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc index 9c90194f265ca..a25f157ea9eae 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc @@ -5,6 +5,7 @@ #include "envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h" #include "source/common/http/utility.h" +#include "source/common/stream_info/upstream_address.h" #include "source/extensions/common/dynamic_forward_proxy/dns_cache.h" namespace Envoy { @@ -29,7 +30,8 @@ ProxyFilterConfig::ProxyFilterConfig( Upstream::ClusterManager& cluster_manager) : dns_cache_manager_(cache_manager_factory.get()), dns_cache_(dns_cache_manager_->getCache(proto_config.dns_cache_config())), - cluster_manager_(cluster_manager) {} + cluster_manager_(cluster_manager), + save_upstream_address_(proto_config.save_upstream_address()) {} ProxyPerRouteConfig::ProxyPerRouteConfig( const envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig& config) @@ -64,6 +66,8 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::RequestHeaderMap& hea return Http::FilterHeadersStatus::Continue; } if (cluster_type->name() != "envoy.clusters.dynamic_forward_proxy") { + ENVOY_STREAM_LOG(debug, "cluster_type->name(): {} ", *this->decoder_callbacks_, + cluster_type->name()); return Http::FilterHeadersStatus::Continue; } @@ -119,10 +123,17 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::RequestHeaderMap& hea } switch (result.status_) { - case LoadDnsCacheEntryStatus::InCache: + case LoadDnsCacheEntryStatus::InCache: { ASSERT(cache_load_handle_ == nullptr); ENVOY_STREAM_LOG(debug, "DNS cache entry already loaded, continuing", *decoder_callbacks_); + + auto const& host = config_->cache().getHost(headers.Host()->value().getStringView()); + if (host.has_value()) { + addHostAddressToFilterState(host.value()->address()); + } + return Http::FilterHeadersStatus::Continue; + } case LoadDnsCacheEntryStatus::Loading: ASSERT(cache_load_handle_ != nullptr); ENVOY_STREAM_LOG(debug, "waiting to load DNS cache entry", *decoder_callbacks_); @@ -138,10 +149,45 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::RequestHeaderMap& hea NOT_REACHED_GCOVR_EXCL_LINE; } -void ProxyFilter::onLoadDnsCacheComplete(const Common::DynamicForwardProxy::DnsHostInfoSharedPtr&) { - ENVOY_STREAM_LOG(debug, "load DNS cache complete, continuing", *decoder_callbacks_); +void ProxyFilter::addHostAddressToFilterState( + const Network::Address::InstanceConstSharedPtr& address) { + + if (!config_->saveUpstreamAddress()) { + return; + } + + // `onLoadDnsCacheComplete` is called by DNS cache on first resolution even if there was a + // resolution failure (null address). This check makes sure that we do not add null address to + // FilterState when this happens. + if (!address) { + ENVOY_STREAM_LOG(debug, "Cannot add address to filter state: invalid address", + *decoder_callbacks_); + return; + } + + ENVOY_STREAM_LOG(trace, "Adding resolved host {} to filter state", *decoder_callbacks_, + address->asString()); + + const Envoy::StreamInfo::FilterStateSharedPtr& filter_state = + decoder_callbacks_->streamInfo().filterState(); + + auto address_obj = std::make_unique(); + address_obj->address_ = address; + + filter_state->setData(StreamInfo::UpstreamAddress::key(), std::move(address_obj), + StreamInfo::FilterState::StateType::Mutable, + StreamInfo::FilterState::LifeSpan::Request); +} + +void ProxyFilter::onLoadDnsCacheComplete( + const Common::DynamicForwardProxy::DnsHostInfoSharedPtr& host_info) { + ENVOY_STREAM_LOG(debug, "load DNS cache complete, continuing after adding resolved host: {}", + *decoder_callbacks_, host_info->resolvedHost()); ASSERT(circuit_breaker_ != nullptr); circuit_breaker_.reset(); + + addHostAddressToFilterState(host_info->address()); + decoder_callbacks_->continueDecoding(); } diff --git a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h index d97e3efeed54c..a0920e553c704 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h +++ b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h @@ -20,11 +20,13 @@ class ProxyFilterConfig { Extensions::Common::DynamicForwardProxy::DnsCache& cache() { return *dns_cache_; } Upstream::ClusterManager& clusterManager() { return cluster_manager_; } + bool saveUpstreamAddress() const { return save_upstream_address_; }; private: const Extensions::Common::DynamicForwardProxy::DnsCacheManagerSharedPtr dns_cache_manager_; const Extensions::Common::DynamicForwardProxy::DnsCacheSharedPtr dns_cache_; Upstream::ClusterManager& cluster_manager_; + const bool save_upstream_address_; }; using ProxyFilterConfigSharedPtr = std::shared_ptr; @@ -59,6 +61,8 @@ class ProxyFilter const Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr&) override; private: + void addHostAddressToFilterState(const Network::Address::InstanceConstSharedPtr& address); + const ProxyFilterConfigSharedPtr config_; Upstream::ClusterInfoConstSharedPtr cluster_info_; Upstream::ResourceAutoIncDecPtr circuit_breaker_; diff --git a/source/extensions/filters/http/dynamo/config.cc b/source/extensions/filters/http/dynamo/config.cc index 0850dc9b70836..eaf33d18e73f2 100644 --- a/source/extensions/filters/http/dynamo/config.cc +++ b/source/extensions/filters/http/dynamo/config.cc @@ -19,7 +19,7 @@ Http::FilterFactoryCb DynamoFilterConfig::createFilterFactoryFromProtoTyped( auto stats = std::make_shared(context.scope(), stats_prefix); return [&context, stats](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared( - context.runtime(), stats, context.dispatcher().timeSource())); + context.runtime(), stats, context.mainThreadDispatcher().timeSource())); }; } diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 29a6ca8be8437..6722acaaaa469 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -160,7 +160,8 @@ Http::FilterHeadersStatus Filter::encode100ContinueHeaders(Http::ResponseHeaderM Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) { ENVOY_STREAM_LOG(trace, - "ext_authz filter has {} response header(s) to add to the encoded response:", + "ext_authz filter has {} response header(s) to add and {} response header(s) to " + "set to the encoded response:", *encoder_callbacks_, response_headers_to_add_.size()); if (!response_headers_to_add_.empty()) { ENVOY_STREAM_LOG( @@ -171,6 +172,14 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers } } + if (!response_headers_to_set_.empty()) { + ENVOY_STREAM_LOG( + trace, "ext_authz filter set header(s) to the encoded response:", *encoder_callbacks_); + for (const auto& header : response_headers_to_set_) { + ENVOY_STREAM_LOG(trace, "'{}':'{}'", *encoder_callbacks_, header.first.get(), header.second); + headers.setCopy(header.first, header.second); + } + } return Http::FilterHeadersStatus::Continue; } @@ -213,12 +222,13 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { switch (response->status) { case CheckStatus::OK: { - // Any changes to request headers can affect how the request is going to be + // Any changes to request headers or query parameters can affect how the request is going to be // routed. If we are changing the headers we also need to clear the route // cache. if (config_->clearRouteCache() && (!response->headers_to_set.empty() || !response->headers_to_append.empty() || - !response->headers_to_remove.empty())) { + !response->headers_to_remove.empty() || !response->query_parameters_to_set.empty() || + !response->query_parameters_to_remove.empty())) { ENVOY_STREAM_LOG(debug, "ext_authz is clearing route cache", *decoder_callbacks_); decoder_callbacks_->clearRouteCache(); } @@ -271,6 +281,48 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { response_headers_to_add_ = std::move(response->response_headers_to_add); } + if (!response->response_headers_to_set.empty()) { + ENVOY_STREAM_LOG(trace, "ext_authz filter saving {} header(s) to set to the response:", + *decoder_callbacks_, response->response_headers_to_set.size()); + response_headers_to_set_ = std::move(response->response_headers_to_set); + } + + absl::optional modified_query_parameters; + if (!response->query_parameters_to_set.empty()) { + modified_query_parameters = + Http::Utility::parseQueryString(request_headers_->Path()->value().getStringView()); + ENVOY_STREAM_LOG( + trace, "ext_authz filter set query parameter(s) on the request:", *decoder_callbacks_); + for (const auto& [key, value] : response->query_parameters_to_set) { + ENVOY_STREAM_LOG(trace, "'{}={}'", *decoder_callbacks_, key, value); + (*modified_query_parameters)[key] = value; + } + } + + if (!response->query_parameters_to_remove.empty()) { + if (!modified_query_parameters) { + modified_query_parameters = + Http::Utility::parseQueryString(request_headers_->Path()->value().getStringView()); + } + ENVOY_STREAM_LOG(trace, "ext_authz filter removed query parameter(s) from the request:", + *decoder_callbacks_); + for (const auto& key : response->query_parameters_to_remove) { + ENVOY_STREAM_LOG(trace, "'{}'", *decoder_callbacks_, key); + (*modified_query_parameters).erase(key); + } + } + + // We modified the query parameters in some way, so regenerate the `path` header and set it + // here. + if (modified_query_parameters) { + const auto new_path = Http::Utility::replaceQueryString(request_headers_->Path()->value(), + modified_query_parameters.value()); + ENVOY_STREAM_LOG( + trace, "ext_authz filter modified query parameter(s), using new path for request: {}", + *decoder_callbacks_, new_path); + request_headers_->setPath(new_path); + } + if (cluster_) { config_->incCounter(cluster_->statsScope(), config_->ext_authz_ok_); } diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 32563b627203b..cd75def13e928 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -299,6 +299,7 @@ class Filter : public Logger::Loggable, Http::StreamEncoderFilterCallbacks* encoder_callbacks_{}; Http::RequestHeaderMap* request_headers_; Http::HeaderVector response_headers_to_add_{}; + Http::HeaderVector response_headers_to_set_{}; State state_{State::NotStarted}; FilterReturn filter_return_{FilterReturn::ContinueDecoding}; Upstream::ClusterInfoConstSharedPtr cluster_; diff --git a/source/extensions/filters/http/ext_proc/BUILD b/source/extensions/filters/http/ext_proc/BUILD index e49813400fb6a..209cc344ded3b 100644 --- a/source/extensions/filters/http/ext_proc/BUILD +++ b/source/extensions/filters/http/ext_proc/BUILD @@ -29,8 +29,8 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", "@com_google_absl//absl/strings:str_format", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -42,7 +42,7 @@ envoy_cc_extension( ":client_lib", ":ext_proc", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", ], ) @@ -51,7 +51,8 @@ envoy_cc_library( hdrs = ["client.h"], deps = [ "//envoy/grpc:status", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "//envoy/stream_info:stream_info_interface", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -64,7 +65,7 @@ envoy_cc_library( "//envoy/http:header_map_interface", "//source/common/http:header_utility_lib", "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -79,6 +80,6 @@ envoy_cc_library( "//envoy/upstream:cluster_manager_interface", "//source/common/grpc:typed_async_client_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/ext_proc/client.h b/source/extensions/filters/http/ext_proc/client.h index 59df9985e14fc..9d66549086543 100644 --- a/source/extensions/filters/http/ext_proc/client.h +++ b/source/extensions/filters/http/ext_proc/client.h @@ -4,7 +4,8 @@ #include "envoy/common/pure.h" #include "envoy/grpc/status.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" +#include "envoy/stream_info/stream_info.h" namespace Envoy { namespace Extensions { @@ -14,7 +15,7 @@ namespace ExternalProcessing { class ExternalProcessorStream { public: virtual ~ExternalProcessorStream() = default; - virtual void send(envoy::service::ext_proc::v3alpha::ProcessingRequest&& request, + virtual void send(envoy::service::ext_proc::v3::ProcessingRequest&& request, bool end_stream) PURE; // Idempotent close. Return true if it actually closed. virtual bool close() PURE; @@ -26,7 +27,7 @@ class ExternalProcessorCallbacks { public: virtual ~ExternalProcessorCallbacks() = default; virtual void onReceiveMessage( - std::unique_ptr&& response) PURE; + std::unique_ptr&& response) PURE; virtual void onGrpcError(Grpc::Status::GrpcStatus error) PURE; virtual void onGrpcClose() PURE; }; @@ -34,7 +35,8 @@ class ExternalProcessorCallbacks { class ExternalProcessorClient { public: virtual ~ExternalProcessorClient() = default; - virtual ExternalProcessorStreamPtr start(ExternalProcessorCallbacks& callbacks) PURE; + virtual ExternalProcessorStreamPtr start(ExternalProcessorCallbacks& callbacks, + const StreamInfo::StreamInfo& stream_info) PURE; }; using ExternalProcessorClientPtr = std::unique_ptr; diff --git a/source/extensions/filters/http/ext_proc/client_impl.cc b/source/extensions/filters/http/ext_proc/client_impl.cc index d8834c8a827a4..79b1ebf944113 100644 --- a/source/extensions/filters/http/ext_proc/client_impl.cc +++ b/source/extensions/filters/http/ext_proc/client_impl.cc @@ -5,42 +5,46 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -static constexpr char kExternalMethod[] = - "envoy.service.ext_proc.v3alpha.ExternalProcessor.Process"; +static constexpr char kExternalMethod[] = "envoy.service.ext_proc.v3.ExternalProcessor.Process"; ExternalProcessorClientImpl::ExternalProcessorClientImpl( Grpc::AsyncClientManager& client_manager, - const envoy::config::core::v3::GrpcService& grpc_service, Stats::Scope& scope) { - factory_ = client_manager.factoryForGrpcService(grpc_service, scope, true); -} + const envoy::config::core::v3::GrpcService& grpc_service, Stats::Scope& scope) + : client_manager_(client_manager), grpc_service_(grpc_service), scope_(scope) {} ExternalProcessorStreamPtr -ExternalProcessorClientImpl::start(ExternalProcessorCallbacks& callbacks) { +ExternalProcessorClientImpl::start(ExternalProcessorCallbacks& callbacks, + const StreamInfo::StreamInfo& stream_info) { Grpc::AsyncClient grpcClient( - factory_->createUncachedRawAsyncClient()); - return std::make_unique(std::move(grpcClient), callbacks); + client_manager_.getOrCreateRawAsyncClient(grpc_service_, scope_, true, + Grpc::CacheOption::AlwaysCache)); + return std::make_unique(std::move(grpcClient), callbacks, + stream_info); } ExternalProcessorStreamImpl::ExternalProcessorStreamImpl( Grpc::AsyncClient&& client, - ExternalProcessorCallbacks& callbacks) + ExternalProcessorCallbacks& callbacks, const StreamInfo::StreamInfo& stream_info) : callbacks_(callbacks) { client_ = std::move(client); auto descriptor = Protobuf::DescriptorPool::generated_pool()->FindMethodByName(kExternalMethod); + grpc_context_.stream_info = &stream_info; Http::AsyncClient::StreamOptions options; + options.setParentContext(grpc_context_); stream_ = client_.start(*descriptor, *this, options); } -void ExternalProcessorStreamImpl::send( - envoy::service::ext_proc::v3alpha::ProcessingRequest&& request, bool end_stream) { +void ExternalProcessorStreamImpl::send(envoy::service::ext_proc::v3::ProcessingRequest&& request, + bool end_stream) { stream_.sendMessage(std::move(request), end_stream); } bool ExternalProcessorStreamImpl::close() { if (!stream_closed_) { ENVOY_LOG(debug, "Closing gRPC stream"); - stream_->closeStream(); + stream_.closeStream(); stream_closed_ = true; + stream_.resetStream(); return true; } return false; diff --git a/source/extensions/filters/http/ext_proc/client_impl.h b/source/extensions/filters/http/ext_proc/client_impl.h index fdce71ed1a173..d1381e5093a38 100644 --- a/source/extensions/filters/http/ext_proc/client_impl.h +++ b/source/extensions/filters/http/ext_proc/client_impl.h @@ -5,14 +5,14 @@ #include "envoy/config/core/v3/grpc_service.pb.h" #include "envoy/grpc/async_client_manager.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/stats/scope.h" #include "source/common/grpc/typed_async_client.h" #include "source/extensions/filters/http/ext_proc/client.h" -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; namespace Envoy { namespace Extensions { @@ -27,10 +27,13 @@ class ExternalProcessorClientImpl : public ExternalProcessorClient { const envoy::config::core::v3::GrpcService& grpc_service, Stats::Scope& scope); - ExternalProcessorStreamPtr start(ExternalProcessorCallbacks& callbacks) override; + ExternalProcessorStreamPtr start(ExternalProcessorCallbacks& callbacks, + const StreamInfo::StreamInfo& stream_info) override; private: - Grpc::AsyncClientFactoryPtr factory_; + Grpc::AsyncClientManager& client_manager_; + const envoy::config::core::v3::GrpcService grpc_service_; + Stats::Scope& scope_; }; class ExternalProcessorStreamImpl : public ExternalProcessorStream, @@ -38,7 +41,8 @@ class ExternalProcessorStreamImpl : public ExternalProcessorStream, public Logger::Loggable { public: ExternalProcessorStreamImpl(Grpc::AsyncClient&& client, - ExternalProcessorCallbacks& callbacks); + ExternalProcessorCallbacks& callbacks, + const StreamInfo::StreamInfo& stream_info); void send(ProcessingRequest&& request, bool end_stream) override; // Close the stream. This is idempotent and will return true if we // actually closed it. @@ -57,6 +61,7 @@ class ExternalProcessorStreamImpl : public ExternalProcessorStream, ExternalProcessorCallbacks& callbacks_; Grpc::AsyncClient client_; Grpc::AsyncStream stream_; + Http::AsyncClient::ParentContext grpc_context_; bool stream_closed_ = false; }; diff --git a/source/extensions/filters/http/ext_proc/config.cc b/source/extensions/filters/http/ext_proc/config.cc index 1e8ec8e321c4d..607126d5f93ec 100644 --- a/source/extensions/filters/http/ext_proc/config.cc +++ b/source/extensions/filters/http/ext_proc/config.cc @@ -9,7 +9,7 @@ namespace HttpFilters { namespace ExternalProcessing { Http::FilterFactoryCb ExternalProcessingFilterConfig::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor& proto_config, + const envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { const uint32_t message_timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config, message_timeout, DefaultMessageTimeoutMs); @@ -28,7 +28,7 @@ Http::FilterFactoryCb ExternalProcessingFilterConfig::createFilterFactoryFromPro Router::RouteSpecificFilterConfigConstSharedPtr ExternalProcessingFilterConfig::createRouteSpecificFilterConfigTyped( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute& proto_config, + const envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute& proto_config, Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) { return std::make_shared(proto_config); } diff --git a/source/extensions/filters/http/ext_proc/config.h b/source/extensions/filters/http/ext_proc/config.h index 9918f341c402a..bf47ede88fed0 100644 --- a/source/extensions/filters/http/ext_proc/config.h +++ b/source/extensions/filters/http/ext_proc/config.h @@ -2,8 +2,8 @@ #include -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.validate.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -13,9 +13,8 @@ namespace HttpFilters { namespace ExternalProcessing { class ExternalProcessingFilterConfig - : public Common::FactoryBase< - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor, - envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute> { + : public Common::FactoryBase { public: ExternalProcessingFilterConfig() : FactoryBase("envoy.filters.http.ext_proc") {} @@ -24,11 +23,11 @@ class ExternalProcessingFilterConfig static constexpr uint64_t DefaultMessageTimeoutMs = 200; Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor& proto_config, + const envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute& proto_config, + const envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute& proto_config, Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor& validator) override; }; diff --git a/source/extensions/filters/http/ext_proc/ext_proc.cc b/source/extensions/filters/http/ext_proc/ext_proc.cc index e04a8cf857014..1eca01fa4ab76 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.cc +++ b/source/extensions/filters/http/ext_proc/ext_proc.cc @@ -10,12 +10,12 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; +using envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; -using envoy::service::ext_proc::v3alpha::ImmediateResponse; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::service::ext_proc::v3::ImmediateResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using Http::FilterDataStatus; using Http::FilterHeadersStatus; @@ -55,7 +55,7 @@ Filter::StreamOpenState Filter::openStream() { ENVOY_BUG(!processing_complete_, "openStream should not have been called"); if (!stream_) { ENVOY_LOG(debug, "Opening gRPC stream to external processor"); - stream_ = client_->start(*this); + stream_ = client_->start(*this, decoder_callbacks_->streamInfo()); stats_.streams_started_.inc(); if (processing_complete_) { // Stream failed while starting and either onGrpcError or onGrpcClose was already called @@ -65,18 +65,26 @@ Filter::StreamOpenState Filter::openStream() { return StreamOpenState::Ok; } -void Filter::onDestroy() { - ENVOY_LOG(trace, "onDestroy"); - // Make doubly-sure we no longer use the stream, as - // per the filter contract. - processing_complete_ = true; +void Filter::closeStream() { if (stream_) { + ENVOY_LOG(debug, "Calling close on stream"); if (stream_->close()) { stats_.streams_closed_.inc(); } + stream_.reset(); + } else { + ENVOY_LOG(debug, "Stream already closed"); } } +void Filter::onDestroy() { + ENVOY_LOG(debug, "onDestroy"); + // Make doubly-sure we no longer use the stream, as + // per the filter contract. + processing_complete_ = true; + closeStream(); +} + FilterHeadersStatus Filter::onHeaders(ProcessorState& state, Http::RequestOrResponseHeaderMap& headers, bool end_stream) { switch (openStream()) { @@ -478,7 +486,9 @@ void Filter::onReceiveMessage(std::unique_ptr&& r) { case ProcessingResponse::ResponseCase::kImmediateResponse: // We won't be sending anything more to the stream after we // receive this message. + ENVOY_LOG(debug, "Sending immediate response"); processing_complete_ = true; + closeStream(); cleanUpTimers(); sendImmediateResponse(response->immediate_response()); message_handled = true; @@ -499,6 +509,7 @@ void Filter::onReceiveMessage(std::unique_ptr&& r) { // to protect us from a malformed server. ENVOY_LOG(warn, "Spurious response message {} received on gRPC stream", response->response_case()); + closeStream(); clearAsyncState(); processing_complete_ = true; } @@ -519,6 +530,7 @@ void Filter::onGrpcError(Grpc::Status::GrpcStatus status) { } else { processing_complete_ = true; + closeStream(); // Since the stream failed, there is no need to handle timeouts, so // make sure that they do not fire now. cleanUpTimers(); @@ -535,6 +547,7 @@ void Filter::onGrpcClose() { stats_.streams_closed_.inc(); // Successful close. We can ignore the stream for the rest of our request // and response processing. + closeStream(); clearAsyncState(); } @@ -547,12 +560,14 @@ void Filter::onMessageTimeout() { // and we can't wait any more. So, as we do for a spurious message, ignore // the external processor for the rest of the request. processing_complete_ = true; + closeStream(); stats_.failure_mode_allowed_.inc(); clearAsyncState(); } else { // Return an error and stop processing the current stream. processing_complete_ = true; + closeStream(); decoding_state_.setCallbackState(ProcessorState::CallbackState::Idle); encoding_state_.setCallbackState(ProcessorState::CallbackState::Idle); ImmediateResponse errorResponse; diff --git a/source/extensions/filters/http/ext_proc/ext_proc.h b/source/extensions/filters/http/ext_proc/ext_proc.h index ca6c87060dd82..f831e69ee7ce6 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.h +++ b/source/extensions/filters/http/ext_proc/ext_proc.h @@ -5,10 +5,10 @@ #include #include "envoy/event/timer.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" #include "envoy/grpc/async_client.h" #include "envoy/http/filter.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" @@ -38,7 +38,7 @@ struct ExtProcFilterStats { class FilterConfig { public: - FilterConfig(const envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor& config, + FilterConfig(const envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor& config, const std::chrono::milliseconds message_timeout, Stats::Scope& scope, const std::string& stats_prefix) : failure_mode_allow_(config.failure_mode_allow()), message_timeout_(message_timeout), @@ -51,8 +51,7 @@ class FilterConfig { const ExtProcFilterStats& stats() const { return stats_; } - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& - processingMode() const { + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& processingMode() const { return processing_mode_; } @@ -67,7 +66,7 @@ class FilterConfig { const std::chrono::milliseconds message_timeout_; ExtProcFilterStats stats_; - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode processing_mode_; + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode processing_mode_; }; using FilterConfigSharedPtr = std::shared_ptr; @@ -75,20 +74,19 @@ using FilterConfigSharedPtr = std::shared_ptr; class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { public: explicit FilterConfigPerRoute( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute& config); + const envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute& config); void merge(const FilterConfigPerRoute& other); bool disabled() const { return disabled_; } - const absl::optional& + const absl::optional& processingMode() const { return processing_mode_; } private: bool disabled_; - absl::optional - processing_mode_; + absl::optional processing_mode_; }; class Filter : public Logger::Loggable, @@ -129,7 +127,7 @@ class Filter : public Logger::Loggable, // ExternalProcessorCallbacks void onReceiveMessage( - std::unique_ptr&& response) override; + std::unique_ptr&& response) override; void onGrpcError(Grpc::Status::GrpcStatus error) override; @@ -149,10 +147,11 @@ class Filter : public Logger::Loggable, private: void mergePerRouteConfig(); StreamOpenState openStream(); + void closeStream(); void cleanUpTimers(); void clearAsyncState(); - void sendImmediateResponse(const envoy::service::ext_proc::v3alpha::ImmediateResponse& response); + void sendImmediateResponse(const envoy::service::ext_proc::v3::ImmediateResponse& response); Http::FilterHeadersStatus onHeaders(ProcessorState& state, Http::RequestOrResponseHeaderMap& headers, bool end_stream); @@ -182,7 +181,7 @@ class Filter : public Logger::Loggable, }; extern std::string responseCaseToString( - const envoy::service::ext_proc::v3alpha::ProcessingResponse::ResponseCase response_case); + const envoy::service::ext_proc::v3::ProcessingResponse::ResponseCase response_case); } // namespace ExternalProcessing } // namespace HttpFilters diff --git a/source/extensions/filters/http/ext_proc/mutation_utils.cc b/source/extensions/filters/http/ext_proc/mutation_utils.cc index 1079716b8a7c0..b4d87f536e442 100644 --- a/source/extensions/filters/http/ext_proc/mutation_utils.cc +++ b/source/extensions/filters/http/ext_proc/mutation_utils.cc @@ -15,11 +15,11 @@ using Http::Headers; using Http::LowerCaseString; using envoy::config::core::v3::HeaderValueOption; -using envoy::service::ext_proc::v3alpha::BodyMutation; -using envoy::service::ext_proc::v3alpha::BodyResponse; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeaderMutation; -using envoy::service::ext_proc::v3alpha::HeadersResponse; +using envoy::service::ext_proc::v3::BodyMutation; +using envoy::service::ext_proc::v3::BodyResponse; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeaderMutation; +using envoy::service::ext_proc::v3::HeadersResponse; void MutationUtils::headersToProto(const Http::HeaderMap& headers_in, envoy::config::core::v3::HeaderMap& proto_out) { diff --git a/source/extensions/filters/http/ext_proc/mutation_utils.h b/source/extensions/filters/http/ext_proc/mutation_utils.h index f57c13793d8b5..2776c807bfa10 100644 --- a/source/extensions/filters/http/ext_proc/mutation_utils.h +++ b/source/extensions/filters/http/ext_proc/mutation_utils.h @@ -2,7 +2,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/http/header_map.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/common/common/logger.h" @@ -19,22 +19,21 @@ class MutationUtils : public Logger::Loggable { // Apply mutations that are common to header responses. static void - applyCommonHeaderResponse(const envoy::service::ext_proc::v3alpha::HeadersResponse& response, + applyCommonHeaderResponse(const envoy::service::ext_proc::v3::HeadersResponse& response, Http::HeaderMap& headers); // Modify header map based on a set of mutations from a protobuf - static void - applyHeaderMutations(const envoy::service::ext_proc::v3alpha::HeaderMutation& mutation, - Http::HeaderMap& headers, bool replacing_message); + static void applyHeaderMutations(const envoy::service::ext_proc::v3::HeaderMutation& mutation, + Http::HeaderMap& headers, bool replacing_message); // Apply mutations that are common to body responses. // Mutations will be applied to the header map if it is not null. - static void applyCommonBodyResponse(const envoy::service::ext_proc::v3alpha::BodyResponse& body, + static void applyCommonBodyResponse(const envoy::service::ext_proc::v3::BodyResponse& body, Http::RequestOrResponseHeaderMap* headers, Buffer::Instance& buffer); // Modify a buffer based on a set of mutations from a protobuf - static void applyBodyMutations(const envoy::service::ext_proc::v3alpha::BodyMutation& mutation, + static void applyBodyMutations(const envoy::service::ext_proc::v3::BodyMutation& mutation, Buffer::Instance& buffer); // Determine if a particular HTTP status code is valid. diff --git a/source/extensions/filters/http/ext_proc/processor_state.cc b/source/extensions/filters/http/ext_proc/processor_state.cc index 8cc1bb21de791..0448c475e28b5 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.cc +++ b/source/extensions/filters/http/ext_proc/processor_state.cc @@ -10,13 +10,13 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode_BodySendMode; -using envoy::service::ext_proc::v3alpha::BodyResponse; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeadersResponse; -using envoy::service::ext_proc::v3alpha::TrailersResponse; +using envoy::service::ext_proc::v3::BodyResponse; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeadersResponse; +using envoy::service::ext_proc::v3::TrailersResponse; void ProcessorState::startMessageTimer(Event::TimerCb cb, std::chrono::milliseconds timeout) { if (!message_timer_) { diff --git a/source/extensions/filters/http/ext_proc/processor_state.h b/source/extensions/filters/http/ext_proc/processor_state.h index d3f74746e0bd4..6c6a07bfead30 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.h +++ b/source/extensions/filters/http/ext_proc/processor_state.h @@ -5,10 +5,10 @@ #include "envoy/buffer/buffer.h" #include "envoy/event/timer.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/processing_mode.pb.h" #include "envoy/http/filter.h" #include "envoy/http/header_map.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/common/buffer/buffer_impl.h" #include "source/common/common/logger.h" @@ -96,11 +96,10 @@ class ProcessorState : public Logger::Loggable { bool partialBodyProcessed() const { return partial_body_processed_; } virtual void setProcessingMode( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) PURE; + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) PURE; bool sendHeaders() const { return send_headers_; } bool sendTrailers() const { return send_trailers_; } - envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode - bodyMode() const { + envoy::extensions::filters::http::ext_proc::v3::ProcessingMode_BodySendMode bodyMode() const { return body_mode_; } @@ -114,9 +113,9 @@ class ProcessorState : public Logger::Loggable { virtual void requestWatermark() PURE; virtual void clearWatermark() PURE; - bool handleHeadersResponse(const envoy::service::ext_proc::v3alpha::HeadersResponse& response); - bool handleBodyResponse(const envoy::service::ext_proc::v3alpha::BodyResponse& response); - bool handleTrailersResponse(const envoy::service::ext_proc::v3alpha::TrailersResponse& response); + bool handleHeadersResponse(const envoy::service::ext_proc::v3::HeadersResponse& response); + bool handleBodyResponse(const envoy::service::ext_proc::v3::BodyResponse& response); + bool handleTrailersResponse(const envoy::service::ext_proc::v3::TrailersResponse& response); virtual const Buffer::Instance* bufferedData() const PURE; bool hasBufferedData() const { return bufferedData() != nullptr && bufferedData()->length() > 0; } @@ -144,16 +143,16 @@ class ProcessorState : public Logger::Loggable { void continueIfNecessary(); void clearAsyncState(); - virtual envoy::service::ext_proc::v3alpha::HttpHeaders* - mutableHeaders(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const PURE; - virtual envoy::service::ext_proc::v3alpha::HttpBody* - mutableBody(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const PURE; - virtual envoy::service::ext_proc::v3alpha::HttpTrailers* - mutableTrailers(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const PURE; + virtual envoy::service::ext_proc::v3::HttpHeaders* + mutableHeaders(envoy::service::ext_proc::v3::ProcessingRequest& request) const PURE; + virtual envoy::service::ext_proc::v3::HttpBody* + mutableBody(envoy::service::ext_proc::v3::ProcessingRequest& request) const PURE; + virtual envoy::service::ext_proc::v3::HttpTrailers* + mutableTrailers(envoy::service::ext_proc::v3::ProcessingRequest& request) const PURE; protected: void setBodyMode( - envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode body_mode); + envoy::extensions::filters::http::ext_proc::v3::ProcessingMode_BodySendMode body_mode); Filter& filter_; Http::StreamFilterCallbacks* filter_callbacks_; @@ -183,7 +182,7 @@ class ProcessorState : public Logger::Loggable { bool send_trailers_ : 1; // The specific mode for body handling - envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode body_mode_; + envoy::extensions::filters::http::ext_proc::v3::ProcessingMode_BodySendMode body_mode_; Http::RequestOrResponseHeaderMap* headers_ = nullptr; Http::HeaderMap* trailers_ = nullptr; @@ -194,8 +193,7 @@ class ProcessorState : public Logger::Loggable { class DecodingProcessorState : public ProcessorState { public: explicit DecodingProcessorState( - Filter& filter, - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) + Filter& filter, const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) : ProcessorState(filter) { setProcessingModeInternal(mode); } @@ -232,23 +230,23 @@ class DecodingProcessorState : public ProcessorState { void continueProcessing() const override { decoder_callbacks_->continueDecoding(); } - envoy::service::ext_proc::v3alpha::HttpHeaders* - mutableHeaders(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpHeaders* + mutableHeaders(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_request_headers(); } - envoy::service::ext_proc::v3alpha::HttpBody* - mutableBody(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpBody* + mutableBody(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_request_body(); } - envoy::service::ext_proc::v3alpha::HttpTrailers* - mutableTrailers(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpTrailers* + mutableTrailers(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_request_trailers(); } void setProcessingMode( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) override { + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) override { setProcessingModeInternal(mode); } @@ -257,7 +255,7 @@ class DecodingProcessorState : public ProcessorState { private: void setProcessingModeInternal( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode); + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode); Http::StreamDecoderFilterCallbacks* decoder_callbacks_{}; }; @@ -265,8 +263,7 @@ class DecodingProcessorState : public ProcessorState { class EncodingProcessorState : public ProcessorState { public: explicit EncodingProcessorState( - Filter& filter, - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) + Filter& filter, const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) : ProcessorState(filter) { setProcessingModeInternal(mode); } @@ -303,23 +300,23 @@ class EncodingProcessorState : public ProcessorState { void continueProcessing() const override { encoder_callbacks_->continueEncoding(); } - envoy::service::ext_proc::v3alpha::HttpHeaders* - mutableHeaders(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpHeaders* + mutableHeaders(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_response_headers(); } - envoy::service::ext_proc::v3alpha::HttpBody* - mutableBody(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpBody* + mutableBody(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_response_body(); } - envoy::service::ext_proc::v3alpha::HttpTrailers* - mutableTrailers(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpTrailers* + mutableTrailers(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_response_trailers(); } void setProcessingMode( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) override { + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) override { setProcessingModeInternal(mode); } @@ -328,7 +325,7 @@ class EncodingProcessorState : public ProcessorState { private: void setProcessingModeInternal( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode); + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode); Http::StreamEncoderFilterCallbacks* encoder_callbacks_{}; }; diff --git a/source/extensions/filters/http/grpc_http1_bridge/BUILD b/source/extensions/filters/http/grpc_http1_bridge/BUILD index c812c4e4c9907..6b0c58125b6a2 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/source/extensions/filters/http/grpc_http1_bridge/BUILD @@ -33,12 +33,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # Legacy test use. TODO(#9953) clean up. - extra_visibility = [ - "//source/exe:__pkg__", - "//test/integration:__subpackages__", - "//test/server:__subpackages__", - ], deps = [ "//envoy/registry", "//envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index b1b1cd319a404..32014e2ed3d81 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -214,6 +214,7 @@ JsonTranscoderConfig::JsonTranscoderConfig( default: NOT_REACHED_GCOVR_EXCL_LINE; } + pmb.SetQueryParamUnescapePlus(proto_config.query_param_unescape_plus()); path_matcher_ = pmb.Build(); diff --git a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc index 841e56989eeae..7280a419d21da 100644 --- a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc +++ b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc @@ -275,7 +275,7 @@ class GrpcStatsFilter : public Http::PassThroughFilter { Grpc::FrameInspector response_counter_; Upstream::ClusterInfoConstSharedPtr cluster_; absl::optional request_names_; -}; // namespace +}; } // namespace diff --git a/source/extensions/filters/http/health_check/BUILD b/source/extensions/filters/http/health_check/BUILD index 1ecafee6b332c..3841dd7b6cc7b 100644 --- a/source/extensions/filters/http/health_check/BUILD +++ b/source/extensions/filters/http/health_check/BUILD @@ -37,12 +37,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # Legacy test use. TODO(#9953) clean up. - extra_visibility = [ - "//test/common/filter:__subpackages__", - "//test/integration:__subpackages__", - "//test/server:__subpackages__", - ], deps = [ "//envoy/registry", "//source/common/http:header_utility_lib", diff --git a/source/extensions/filters/http/health_check/config.cc b/source/extensions/filters/http/health_check/config.cc index faad4fce070c3..3cf363721e65b 100644 --- a/source/extensions/filters/http/health_check/config.cc +++ b/source/extensions/filters/http/health_check/config.cc @@ -33,7 +33,7 @@ Http::FilterFactoryCb HealthCheckFilterConfig::createFilterFactoryFromProtoTyped HealthCheckCacheManagerSharedPtr cache_manager; if (cache_time_ms > 0) { cache_manager = std::make_shared( - context.dispatcher(), std::chrono::milliseconds(cache_time_ms)); + context.mainThreadDispatcher(), std::chrono::milliseconds(cache_time_ms)); } ClusterMinHealthyPercentagesConstSharedPtr cluster_min_healthy_percentages; diff --git a/source/extensions/filters/http/jwt_authn/authenticator.cc b/source/extensions/filters/http/jwt_authn/authenticator.cc index ef17e90fb1a6a..6d7a70179346d 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.cc +++ b/source/extensions/filters/http/jwt_authn/authenticator.cc @@ -43,9 +43,10 @@ class AuthenticatorImpl : public Logger::Loggable, // Following functions are for JwksFetcher::JwksReceiver interface void onJwksSuccess(google::jwt_verify::JwksPtr&& jwks) override; void onJwksError(Failure reason) override; - // Following functions are for Authenticator interface + // Following functions are for Authenticator interface. void verify(Http::HeaderMap& headers, Tracing::Span& parent_span, - std::vector&& tokens, SetPayloadCallback set_payload_cb, + std::vector&& tokens, + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, AuthenticatorCallback callback) override; void onDestroy() override; @@ -90,8 +91,8 @@ class AuthenticatorImpl : public Logger::Loggable, Http::HeaderMap* headers_{}; // The active span for the request Tracing::Span* parent_span_{&Tracing::NullSpan::instance()}; - // the callback function to set payload - SetPayloadCallback set_payload_cb_; + // The callback function called to set the extracted payload and header from a verified JWT. + SetExtractedJwtDataCallback set_extracted_jwt_data_cb_; // The on_done function. AuthenticatorCallback callback_; // check audience object. @@ -119,12 +120,13 @@ std::string AuthenticatorImpl::name() const { void AuthenticatorImpl::verify(Http::HeaderMap& headers, Tracing::Span& parent_span, std::vector&& tokens, - SetPayloadCallback set_payload_cb, AuthenticatorCallback callback) { + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, + AuthenticatorCallback callback) { ASSERT(!callback_); headers_ = &headers; parent_span_ = &parent_span; tokens_ = std::move(tokens); - set_payload_cb_ = std::move(set_payload_cb); + set_extracted_jwt_data_cb_ = std::move(set_extracted_jwt_data_cb); callback_ = std::move(callback); ENVOY_LOG(debug, "{}: JWT authentication starts (allow_failed={}), tokens size={}", name(), @@ -291,8 +293,15 @@ void AuthenticatorImpl::handleGoodJwt(bool cache_hit) { // Remove JWT from headers. curr_token_->removeJwt(*headers_); } - if (set_payload_cb_ && !provider.payload_in_metadata().empty()) { - set_payload_cb_(provider.payload_in_metadata(), jwt_->payload_pb_); + + if (set_extracted_jwt_data_cb_) { + if (!provider.header_in_metadata().empty()) { + set_extracted_jwt_data_cb_(provider.header_in_metadata(), jwt_->header_pb_); + } + + if (!provider.payload_in_metadata().empty()) { + set_extracted_jwt_data_cb_(provider.payload_in_metadata(), jwt_->payload_pb_); + } } if (provider_ && !cache_hit) { // move the ownership of "owned_jwt_" into the function. diff --git a/source/extensions/filters/http/jwt_authn/authenticator.h b/source/extensions/filters/http/jwt_authn/authenticator.h index 62803498d6941..8e16c5044ce4e 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.h +++ b/source/extensions/filters/http/jwt_authn/authenticator.h @@ -19,7 +19,8 @@ using AuthenticatorPtr = std::unique_ptr; using AuthenticatorCallback = std::function; -using SetPayloadCallback = std::function; +using SetExtractedJwtDataCallback = + std::function; /** * Authenticator object to handle all JWT authentication flow. @@ -31,7 +32,8 @@ class Authenticator { // Verify if headers satisfies the JWT requirements. Can be limited to single provider with // extract_param. virtual void verify(Http::HeaderMap& headers, Tracing::Span& parent_span, - std::vector&& tokens, SetPayloadCallback set_payload_cb, + std::vector&& tokens, + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, AuthenticatorCallback callback) PURE; // Called when the object is about to be destroyed. diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index bfa03f0ac4f05..6ae48e09c6e07 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -119,7 +119,6 @@ class JwtCookieLocation : public JwtLocationBase { void removeJwt(Http::HeaderMap&) const override { // TODO(theshubhamp): remove JWT from cookies. - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } }; diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index c069e3a87b8d6..768e58e73d351 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -100,8 +100,9 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, return Http::FilterHeadersStatus::StopIteration; } -void Filter::setPayload(const ProtobufWkt::Struct& payload) { - decoder_callbacks_->streamInfo().setDynamicMetadata("envoy.filters.http.jwt_authn", payload); +void Filter::setExtractedData(const ProtobufWkt::Struct& extracted_data) { + decoder_callbacks_->streamInfo().setDynamicMetadata("envoy.filters.http.jwt_authn", + extracted_data); } void Filter::onComplete(const Status& status) { diff --git a/source/extensions/filters/http/jwt_authn/filter.h b/source/extensions/filters/http/jwt_authn/filter.h index e743324f8040a..9330d07be6126 100644 --- a/source/extensions/filters/http/jwt_authn/filter.h +++ b/source/extensions/filters/http/jwt_authn/filter.h @@ -31,8 +31,8 @@ class Filter : public Http::StreamDecoderFilter, private: // Following two functions are for Verifier::Callbacks interface. - // Pass the payload as Struct. - void setPayload(const ProtobufWkt::Struct& payload) override; + // Pass the extracted data from a verified JWT as an opaque ProtobufWkt::Struct. + void setExtractedData(const ProtobufWkt::Struct& extracted_data) override; // It will be called when its verify() call is completed. void onComplete(const ::google::jwt_verify::Status& status) override; diff --git a/source/extensions/filters/http/jwt_authn/filter_config.cc b/source/extensions/filters/http/jwt_authn/filter_config.cc index 8d2874290678f..8ead6c17fd52a 100644 --- a/source/extensions/filters/http/jwt_authn/filter_config.cc +++ b/source/extensions/filters/http/jwt_authn/filter_config.cc @@ -15,7 +15,7 @@ FilterConfigImpl::FilterConfigImpl( envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) : proto_config_(std::move(proto_config)), stats_(generateStats(stats_prefix, context.scope())), - cm_(context.clusterManager()), time_source_(context.dispatcher().timeSource()) { + cm_(context.clusterManager()), time_source_(context.mainThreadDispatcher().timeSource()) { ENVOY_LOG(debug, "Loaded JwtAuthConfig: {}", proto_config_.DebugString()); diff --git a/source/extensions/filters/http/jwt_authn/jwks_async_fetcher.cc b/source/extensions/filters/http/jwt_authn/jwks_async_fetcher.cc index 9521fc214cb63..5e1acb2763705 100644 --- a/source/extensions/filters/http/jwt_authn/jwks_async_fetcher.cc +++ b/source/extensions/filters/http/jwt_authn/jwks_async_fetcher.cc @@ -28,7 +28,8 @@ JwksAsyncFetcher::JwksAsyncFetcher(const RemoteJwks& remote_jwks, return; } - cache_duration_timer_ = context_.dispatcher().createTimer([this]() -> void { fetch(); }); + cache_duration_timer_ = + context_.mainThreadDispatcher().createTimer([this]() -> void { fetch(); }); // For fast_listener, just trigger a fetch, not register with init_manager. if (remote_jwks_.async_fetch().fast_listener()) { diff --git a/source/extensions/filters/http/jwt_authn/matcher.cc b/source/extensions/filters/http/jwt_authn/matcher.cc index 7eb1cfcab9d85..3439396a0fbed 100644 --- a/source/extensions/filters/http/jwt_authn/matcher.cc +++ b/source/extensions/filters/http/jwt_authn/matcher.cc @@ -159,7 +159,6 @@ MatcherConstPtr Matcher::create(const RequirementRule& rule) { return std::make_unique(rule); case RouteMatch::PathSpecifierCase::kPath: return std::make_unique(rule); - case RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex: case RouteMatch::PathSpecifierCase::kSafeRegex: return std::make_unique(rule); case RouteMatch::PathSpecifierCase::kConnectMatcher: diff --git a/source/extensions/filters/http/jwt_authn/verifier.cc b/source/extensions/filters/http/jwt_authn/verifier.cc index c22c446cc98e7..6c6449cced31e 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.cc +++ b/source/extensions/filters/http/jwt_authn/verifier.cc @@ -55,14 +55,14 @@ class ContextImpl : public Verifier::Context { // Stores an authenticator object for this request. void storeAuth(AuthenticatorPtr&& auth) { auths_.emplace_back(std::move(auth)); } - // Add a pair of (name, payload), called by Authenticator - void addPayload(const std::string& name, const ProtobufWkt::Struct& payload) { - *(*payload_.mutable_fields())[name].mutable_struct_value() = payload; + // Add a pair of (name, payload), called by Authenticator. It can be either JWT header or payload. + void addExtractedData(const std::string& name, const ProtobufWkt::Struct& extracted_data) { + *(*extrated_data_.mutable_fields())[name].mutable_struct_value() = extracted_data; } - void setPayload() { - if (!payload_.fields().empty()) { - callback_->setPayload(payload_); + void setExtractedData() { + if (!extrated_data_.fields().empty()) { + callback_->setExtractedData(extrated_data_); } } @@ -72,7 +72,7 @@ class ContextImpl : public Verifier::Context { Verifier::Callbacks* callback_; absl::node_hash_map completion_states_; std::vector auths_; - ProtobufWkt::Struct payload_; + ProtobufWkt::Struct extrated_data_; }; // base verifier for provider_name, provider_and_audiences, and allow_missing_or_failed. @@ -88,7 +88,8 @@ class BaseVerifierImpl : public Logger::Loggable, public Verifi } if (Status::Ok == status) { - context.setPayload(); + // We only set the extracted data to context when the JWT is verified. + context.setExtractedData(); } context.callback()->onComplete(status); context.cancel(); @@ -123,8 +124,8 @@ class ProviderVerifierImpl : public BaseVerifierImpl { extractor_->sanitizePayloadHeaders(ctximpl.headers()); auth->verify( ctximpl.headers(), ctximpl.parentSpan(), extractor_->extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& extracted_data) { + ctximpl.addExtractedData(name, extracted_data); }, [this, context](const Status& status) { onComplete(status, static_cast(*context)); @@ -174,8 +175,8 @@ class AllowFailedVerifierImpl : public BaseVerifierImpl { extractor_->sanitizePayloadHeaders(ctximpl.headers()); auth->verify( ctximpl.headers(), ctximpl.parentSpan(), extractor_->extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& extracted_data) { + ctximpl.addExtractedData(name, extracted_data); }, [this, context](const Status& status) { onComplete(status, static_cast(*context)); @@ -209,8 +210,8 @@ class AllowMissingVerifierImpl : public BaseVerifierImpl { extractor_->sanitizePayloadHeaders(ctximpl.headers()); auth->verify( ctximpl.headers(), ctximpl.parentSpan(), extractor_->extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& extracted_data) { + ctximpl.addExtractedData(name, extracted_data); }, [this, context](const Status& status) { onComplete(status, static_cast(*context)); diff --git a/source/extensions/filters/http/jwt_authn/verifier.h b/source/extensions/filters/http/jwt_authn/verifier.h index 2b62ed4bcec90..7d20e709660a8 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.h +++ b/source/extensions/filters/http/jwt_authn/verifier.h @@ -32,7 +32,7 @@ class Verifier { * This function is called before onComplete() function. * It will not be called if no payload to write. */ - virtual void setPayload(const ProtobufWkt::Struct& payload) PURE; + virtual void setExtractedData(const ProtobufWkt::Struct& payload) PURE; /** * Called on completion of request. diff --git a/source/extensions/filters/http/local_ratelimit/config.cc b/source/extensions/filters/http/local_ratelimit/config.cc index 15c7107276960..d70e8f8aac49c 100644 --- a/source/extensions/filters/http/local_ratelimit/config.cc +++ b/source/extensions/filters/http/local_ratelimit/config.cc @@ -16,7 +16,8 @@ Http::FilterFactoryCb LocalRateLimitFilterConfig::createFilterFactoryFromProtoTy const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { FilterConfigSharedPtr filter_config = std::make_shared( - proto_config, context.localInfo(), context.dispatcher(), context.scope(), context.runtime()); + proto_config, context.localInfo(), context.mainThreadDispatcher(), context.scope(), + context.runtime()); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared(filter_config)); }; @@ -27,7 +28,7 @@ LocalRateLimitFilterConfig::createRouteSpecificFilterConfigTyped( const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config, Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) { return std::make_shared(proto_config, context.localInfo(), - context.dispatcher(), context.scope(), + context.mainThreadDispatcher(), context.scope(), context.runtime(), true); } diff --git a/source/extensions/filters/http/lua/BUILD b/source/extensions/filters/http/lua/BUILD index 5b102a5e2a6c0..83116b2f00c31 100644 --- a/source/extensions/filters/http/lua/BUILD +++ b/source/extensions/filters/http/lua/BUILD @@ -44,7 +44,6 @@ envoy_cc_library( "//source/common/crypto:utility_lib", "//source/common/http:header_utility_lib", "//source/common/http:utility_lib", - "//source/extensions/common/crypto:utility_lib", "//source/extensions/filters/common/lua:lua_lib", "//source/extensions/filters/common/lua:wrappers_lib", ], diff --git a/source/extensions/filters/http/lua/config.cc b/source/extensions/filters/http/lua/config.cc index 6ed6ba6ddcd98..822eecc2c773a 100644 --- a/source/extensions/filters/http/lua/config.cc +++ b/source/extensions/filters/http/lua/config.cc @@ -16,7 +16,7 @@ Http::FilterFactoryCb LuaFilterConfig::createFilterFactoryFromProtoTyped( Server::Configuration::FactoryContext& context) { FilterConfigConstSharedPtr filter_config(new FilterConfig{ proto_config, context.threadLocal(), context.clusterManager(), context.api()}); - auto& time_source = context.dispatcher().timeSource(); + auto& time_source = context.mainThreadDispatcher().timeSource(); return [filter_config, &time_source](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared(filter_config, time_source)); }; diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 5c78826978676..9be9dd3d49b4a 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -9,6 +9,7 @@ #include "source/common/common/assert.h" #include "source/common/common/enum_to_int.h" #include "source/common/config/datasource.h" +#include "source/common/crypto/crypto_impl.h" #include "source/common/crypto/utility.h" #include "source/common/http/message_impl.h" @@ -552,37 +553,37 @@ int StreamHandleWrapper::luaConnection(lua_State* state) { } int StreamHandleWrapper::luaLogTrace(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::trace, message); return 0; } int StreamHandleWrapper::luaLogDebug(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::debug, message); return 0; } int StreamHandleWrapper::luaLogInfo(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::info, message); return 0; } int StreamHandleWrapper::luaLogWarn(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::warn, message); return 0; } int StreamHandleWrapper::luaLogErr(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::err, message); return 0; } int StreamHandleWrapper::luaLogCritical(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::critical, message); return 0; } @@ -649,9 +650,8 @@ int StreamHandleWrapper::luaImportPublicKey(lua_State* state) { } int StreamHandleWrapper::luaBase64Escape(lua_State* state) { - size_t input_size; - const char* input = luaL_checklstring(state, 2, &input_size); - auto output = absl::Base64Escape(absl::string_view(input, input_size)); + absl::string_view input = Filters::Common::Lua::getStringViewFromLuaString(state, 2); + auto output = absl::Base64Escape(input); lua_pushlstring(state, output.data(), output.length()); return 1; @@ -700,7 +700,7 @@ FilterConfig::FilterConfig(const envoy::extensions::filters::http::lua::v3::Lua& FilterConfigPerRoute::FilterConfigPerRoute( const envoy::extensions::filters::http::lua::v3::LuaPerRoute& config, Server::Configuration::ServerFactoryContext& context) - : main_thread_dispatcher_(context.dispatcher()), disabled_(config.disabled()), + : main_thread_dispatcher_(context.mainThreadDispatcher()), disabled_(config.disabled()), name_(config.name()) { if (disabled_ || !name_.empty()) { return; @@ -783,7 +783,7 @@ void Filter::scriptError(const Filters::Common::Lua::LuaException& e) { response_stream_wrapper_.reset(); } -void Filter::scriptLog(spdlog::level::level_enum level, const char* message) { +void Filter::scriptLog(spdlog::level::level_enum level, absl::string_view message) { switch (level) { case spdlog::level::trace: ENVOY_LOG(trace, "script log: {}", message); diff --git a/source/extensions/filters/http/lua/lua_filter.h b/source/extensions/filters/http/lua/lua_filter.h index e779d9fdf4245..17c9338627965 100644 --- a/source/extensions/filters/http/lua/lua_filter.h +++ b/source/extensions/filters/http/lua/lua_filter.h @@ -441,7 +441,7 @@ class Filter : public Http::StreamFilter, Logger::Loggable { Upstream::ClusterManager& clusterManager() { return config_->cluster_manager_; } void scriptError(const Filters::Common::Lua::LuaException& e); - virtual void scriptLog(spdlog::level::level_enum level, const char* message); + virtual void scriptLog(spdlog::level::level_enum level, absl::string_view message); // Http::StreamFilterBase void onDestroy() override; diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index 3f2c02a2fe5da..19a6d025ed2cb 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -41,8 +41,8 @@ int HeaderMapWrapper::luaAdd(lua_State* state) { } int HeaderMapWrapper::luaGet(lua_State* state) { - const char* key = luaL_checkstring(state, 2); - const auto value = + absl::string_view key = Filters::Common::Lua::getStringViewFromLuaString(state, 2); + const Http::HeaderUtility::GetAllOfHeaderAsStringResult value = Http::HeaderUtility::getAllOfHeaderAsString(headers_, Http::LowerCaseString(key)); if (value.result().has_value()) { lua_pushlstring(state, value.result().value().data(), value.result().value().length()); @@ -52,6 +52,25 @@ int HeaderMapWrapper::luaGet(lua_State* state) { } } +int HeaderMapWrapper::luaGetAtIndex(lua_State* state) { + absl::string_view key = Filters::Common::Lua::getStringViewFromLuaString(state, 2); + const int index = luaL_checknumber(state, 3); + const Http::HeaderMap::GetResult header_value = headers_.get(Http::LowerCaseString(key)); + if (index >= 0 && header_value.size() > static_cast(index)) { + absl::string_view value = header_value[index]->value().getStringView(); + lua_pushlstring(state, value.data(), value.length()); + return 1; + } + return 0; +} + +int HeaderMapWrapper::luaGetNumValues(lua_State* state) { + absl::string_view key = Filters::Common::Lua::getStringViewFromLuaString(state, 2); + const Http::HeaderMap::GetResult header_value = headers_.get(Http::LowerCaseString(key)); + lua_pushnumber(state, header_value.size()); + return 1; +} + int HeaderMapWrapper::luaPairs(lua_State* state) { if (iterator_.get() != nullptr) { luaL_error(state, "cannot create a second iterator before completing the first"); diff --git a/source/extensions/filters/http/lua/wrappers.h b/source/extensions/filters/http/lua/wrappers.h index e04ca24f47392..25af8afae1fe3 100644 --- a/source/extensions/filters/http/lua/wrappers.h +++ b/source/extensions/filters/http/lua/wrappers.h @@ -4,7 +4,6 @@ #include "envoy/stream_info/stream_info.h" #include "source/common/crypto/utility.h" -#include "source/extensions/common/crypto/crypto_impl.h" #include "source/extensions/filters/common/lua/lua.h" #include "source/extensions/filters/common/lua/wrappers.h" @@ -47,6 +46,8 @@ class HeaderMapWrapper : public Filters::Common::Lua::BaseLuaObject #include "envoy/common/exception.h" -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.validate.h" #include "envoy/registry/registry.h" #include "envoy/secret/secret_manager.h" #include "envoy/secret/secret_provider.h" @@ -37,7 +37,7 @@ secretsProvider(const envoy::extensions::transport_sockets::tls::v3::SdsSecretCo } // namespace Http::FilterFactoryCb OAuth2Config::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2& proto, + const envoy::extensions::filters::http::oauth2::v3::OAuth2& proto, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { if (!proto.has_config()) { throw EnvoyException("config must be present for global config"); diff --git a/source/extensions/filters/http/oauth2/config.h b/source/extensions/filters/http/oauth2/config.h index 10f4fbc8b84d0..6db0c0d480529 100644 --- a/source/extensions/filters/http/oauth2/config.h +++ b/source/extensions/filters/http/oauth2/config.h @@ -2,8 +2,8 @@ #include -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h" -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -13,13 +13,14 @@ namespace HttpFilters { namespace Oauth2 { class OAuth2Config : public Extensions::HttpFilters::Common::FactoryBase< - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2> { + envoy::extensions::filters::http::oauth2::v3::OAuth2> { public: OAuth2Config() : FactoryBase("envoy.filters.http.oauth2") {} - Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2&, const std::string&, - Server::Configuration::FactoryContext&) override; + Http::FilterFactoryCb + createFilterFactoryFromProtoTyped(const envoy::extensions::filters::http::oauth2::v3::OAuth2&, + const std::string&, + Server::Configuration::FactoryContext&) override; }; } // namespace Oauth2 diff --git a/source/extensions/filters/http/oauth2/filter.cc b/source/extensions/filters/http/oauth2/filter.cc index d0234dd733b09..78c8a16d83e9e 100644 --- a/source/extensions/filters/http/oauth2/filter.cc +++ b/source/extensions/filters/http/oauth2/filter.cc @@ -116,7 +116,7 @@ std::string findValue(const absl::flat_hash_map& map, } // namespace FilterConfig::FilterConfig( - const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config& proto_config, + const envoy::extensions::filters::http::oauth2::v3::OAuth2Config& proto_config, Upstream::ClusterManager& cluster_manager, std::shared_ptr secret_reader, Stats::Scope& scope, const std::string& stats_prefix) : oauth_token_endpoint_(proto_config.token_endpoint()), diff --git a/source/extensions/filters/http/oauth2/filter.h b/source/extensions/filters/http/oauth2/filter.h index e2e4b88be3b86..6722845fddaae 100644 --- a/source/extensions/filters/http/oauth2/filter.h +++ b/source/extensions/filters/http/oauth2/filter.h @@ -7,7 +7,7 @@ #include "envoy/common/callback.h" #include "envoy/common/matchers.h" #include "envoy/config/core/v3/http_uri.pb.h" -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.h" #include "envoy/http/header_map.h" #include "envoy/server/filter_config.h" #include "envoy/stats/stats_macros.h" @@ -100,7 +100,7 @@ struct FilterStats { */ class FilterConfig { public: - FilterConfig(const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config& proto_config, + FilterConfig(const envoy::extensions::filters::http::oauth2::v3::OAuth2Config& proto_config, Upstream::ClusterManager& cluster_manager, std::shared_ptr secret_reader, Stats::Scope& scope, const std::string& stats_prefix); diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 538c4f7425c04..c007471d7779c 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -159,6 +159,8 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, cluster_->statsScope().counterFromStatName(stat_names.ok_).inc(); break; case Filters::Common::RateLimit::LimitStatus::Error: + ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::filter), debug, + "rate limit status, status={}", status); cluster_->statsScope().counterFromStatName(stat_names.error_).inc(); break; case Filters::Common::RateLimit::LimitStatus::OverLimit: diff --git a/source/extensions/filters/http/rbac/BUILD b/source/extensions/filters/http/rbac/BUILD index 603f17ef8a4ff..39d9e481a1095 100644 --- a/source/extensions/filters/http/rbac/BUILD +++ b/source/extensions/filters/http/rbac/BUILD @@ -31,6 +31,7 @@ envoy_cc_library( "//source/common/http:utility_lib", "//source/extensions/filters/common/rbac:engine_lib", "//source/extensions/filters/common/rbac:utility_lib", + "//source/extensions/filters/common/rbac/matchers:upstream_ip_port_lib", "@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/rbac/config.cc b/source/extensions/filters/http/rbac/config.cc index e389ee1030f13..1559328594fa0 100644 --- a/source/extensions/filters/http/rbac/config.cc +++ b/source/extensions/filters/http/rbac/config.cc @@ -15,8 +15,8 @@ Http::FilterFactoryCb RoleBasedAccessControlFilterConfigFactory::createFilterFac const envoy::extensions::filters::http::rbac::v3::RBAC& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { - auto config = std::make_shared(proto_config, stats_prefix, - context.scope()); + auto config = std::make_shared( + proto_config, stats_prefix, context.scope(), context.messageValidationVisitor()); return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamDecoderFilter(std::make_shared(config)); @@ -26,8 +26,9 @@ Http::FilterFactoryCb RoleBasedAccessControlFilterConfigFactory::createFilterFac Router::RouteSpecificFilterConfigConstSharedPtr RoleBasedAccessControlFilterConfigFactory::createRouteSpecificFilterConfigTyped( const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& proto_config, - Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) { - return std::make_shared(proto_config); + Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor& validator) { + return std::make_shared(proto_config, + validator); } /** diff --git a/source/extensions/filters/http/rbac/rbac_filter.cc b/source/extensions/filters/http/rbac/rbac_filter.cc index da1d64d1f520a..85669c2e3a9c8 100644 --- a/source/extensions/filters/http/rbac/rbac_filter.cc +++ b/source/extensions/filters/http/rbac/rbac_filter.cc @@ -14,12 +14,13 @@ namespace RBACFilter { RoleBasedAccessControlFilterConfig::RoleBasedAccessControlFilterConfig( const envoy::extensions::filters::http::rbac::v3::RBAC& proto_config, - const std::string& stats_prefix, Stats::Scope& scope) + const std::string& stats_prefix, Stats::Scope& scope, + ProtobufMessage::ValidationVisitor& validation_visitor) : stats_(Filters::Common::RBAC::generateStats(stats_prefix, proto_config.shadow_rules_stat_prefix(), scope)), shadow_rules_stat_prefix_(proto_config.shadow_rules_stat_prefix()), - engine_(Filters::Common::RBAC::createEngine(proto_config)), - shadow_engine_(Filters::Common::RBAC::createShadowEngine(proto_config)) {} + engine_(Filters::Common::RBAC::createEngine(proto_config, validation_visitor)), + shadow_engine_(Filters::Common::RBAC::createShadowEngine(proto_config, validation_visitor)) {} const Filters::Common::RBAC::RoleBasedAccessControlEngineImpl* RoleBasedAccessControlFilterConfig::engine(const Router::RouteConstSharedPtr route, @@ -35,9 +36,15 @@ RoleBasedAccessControlFilterConfig::engine(const Router::RouteConstSharedPtr rou } RoleBasedAccessControlRouteSpecificFilterConfig::RoleBasedAccessControlRouteSpecificFilterConfig( - const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& per_route_config) - : engine_(Filters::Common::RBAC::createEngine(per_route_config.rbac())), - shadow_engine_(Filters::Common::RBAC::createShadowEngine(per_route_config.rbac())) {} + const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& per_route_config, + ProtobufMessage::ValidationVisitor& validation_visitor) { + // Moved from member initializer to ctor body to overcome clang false warning about memory + // leak (clang-analyzer-cplusplus.NewDeleteLeaks,-warnings-as-errors). + // Potentially https://lists.llvm.org/pipermail/llvm-bugs/2018-July/066769.html + engine_ = Filters::Common::RBAC::createEngine(per_route_config.rbac(), validation_visitor); + shadow_engine_ = + Filters::Common::RBAC::createShadowEngine(per_route_config.rbac(), validation_visitor); +} Http::FilterHeadersStatus RoleBasedAccessControlFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { diff --git a/source/extensions/filters/http/rbac/rbac_filter.h b/source/extensions/filters/http/rbac/rbac_filter.h index 41d38a9a37c1e..a29fa532dc073 100644 --- a/source/extensions/filters/http/rbac/rbac_filter.h +++ b/source/extensions/filters/http/rbac/rbac_filter.h @@ -19,7 +19,8 @@ namespace RBACFilter { class RoleBasedAccessControlRouteSpecificFilterConfig : public Router::RouteSpecificFilterConfig { public: RoleBasedAccessControlRouteSpecificFilterConfig( - const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& per_route_config); + const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& per_route_config, + ProtobufMessage::ValidationVisitor& validation_visitor); const Filters::Common::RBAC::RoleBasedAccessControlEngineImpl* engine(Filters::Common::RBAC::EnforcementMode mode) const { @@ -39,7 +40,8 @@ class RoleBasedAccessControlFilterConfig { public: RoleBasedAccessControlFilterConfig( const envoy::extensions::filters::http::rbac::v3::RBAC& proto_config, - const std::string& stats_prefix, Stats::Scope& scope); + const std::string& stats_prefix, Stats::Scope& scope, + ProtobufMessage::ValidationVisitor& validation_visitor); Filters::Common::RBAC::RoleBasedAccessControlFilterStats& stats() { return stats_; } std::string shadowEffectivePolicyIdField() const { diff --git a/source/extensions/filters/http/tap/config.cc b/source/extensions/filters/http/tap/config.cc index ab992a89a40ce..5c573924f768e 100644 --- a/source/extensions/filters/http/tap/config.cc +++ b/source/extensions/filters/http/tap/config.cc @@ -26,9 +26,10 @@ class HttpTapConfigFactoryImpl : public Extensions::Common::Tap::TapConfigFactor Http::FilterFactoryCb TapFilterFactory::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::tap::v3::Tap& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { - FilterConfigSharedPtr filter_config(new FilterConfigImpl( - proto_config, stats_prefix, std::make_unique(), context.scope(), - context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher())); + FilterConfigSharedPtr filter_config( + new FilterConfigImpl(proto_config, stats_prefix, std::make_unique(), + context.scope(), context.admin(), context.singletonManager(), + context.threadLocal(), context.mainThreadDispatcher())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { auto filter = std::make_shared(filter_config); callbacks.addStreamFilter(filter); diff --git a/source/extensions/filters/http/wasm/config.cc b/source/extensions/filters/http/wasm/config.cc index 06a02611e6cc7..e50184df677a9 100644 --- a/source/extensions/filters/http/wasm/config.cc +++ b/source/extensions/filters/http/wasm/config.cc @@ -16,6 +16,8 @@ namespace Wasm { Http::FilterFactoryCb WasmFilterConfig::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::wasm::v3::Wasm& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); auto filter_config = std::make_shared(proto_config, context); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { auto filter = filter_config->createFilter(); diff --git a/source/extensions/filters/http/wasm/wasm_filter.cc b/source/extensions/filters/http/wasm/wasm_filter.cc index 4296be89d40b3..75e06e69b735a 100644 --- a/source/extensions/filters/http/wasm/wasm_filter.cc +++ b/source/extensions/filters/http/wasm/wasm_filter.cc @@ -21,8 +21,8 @@ FilterConfig::FilterConfig(const envoy::extensions::filters::http::wasm::v3::Was }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { throw Common::Wasm::WasmException( fmt::format("Unable to create Wasm HTTP filter {}", plugin->name_)); diff --git a/source/extensions/filters/listener/original_dst/original_dst.cc b/source/extensions/filters/listener/original_dst/original_dst.cc index ccb0f93a5ff06..9364b55c79765 100644 --- a/source/extensions/filters/listener/original_dst/original_dst.cc +++ b/source/extensions/filters/listener/original_dst/original_dst.cc @@ -17,7 +17,7 @@ Network::Address::InstanceConstSharedPtr OriginalDstFilter::getOriginalDst(Netwo } Network::FilterStatus OriginalDstFilter::onAccept(Network::ListenerFilterCallbacks& cb) { - ENVOY_LOG(debug, "original_dst: New connection accepted"); + ENVOY_LOG(debug, "original_dst: new connection accepted"); Network::ConnectionSocket& socket = cb.socket(); if (socket.addressType() == Network::Address::Type::Ip) { diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index 0d8082a079fe2..aed47748e9aed 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -64,7 +64,7 @@ const KeyValuePair* Config::isTlvTypeNeeded(uint8_t type) const { size_t Config::numberOfNeededTlvTypes() const { return tlv_types_.size(); } Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { - ENVOY_LOG(debug, "proxy_protocol: New connection accepted"); + ENVOY_LOG(debug, "proxy_protocol: new connection accepted"); Network::ConnectionSocket& socket = cb.socket(); socket.ioHandle().initializeFileEvent( cb.dispatcher(), @@ -456,7 +456,7 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { if (buf_off_ < PROXY_PROTO_V2_HEADER_LEN) { ssize_t exp = PROXY_PROTO_V2_HEADER_LEN - buf_off_; const auto read_result = io_handle.recv(buf_ + buf_off_, exp, 0); - if (!result.ok() || read_result.return_value_ != uint64_t(exp)) { + if (!read_result.ok() || read_result.return_value_ != uint64_t(exp)) { ENVOY_LOG(debug, "failed to read proxy protocol (remote closed)"); return ReadOrParseState::Error; } @@ -478,7 +478,7 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { if (ssize_t(buf_off_) + nread >= PROXY_PROTO_V2_HEADER_LEN + addr_len) { ssize_t missing = (PROXY_PROTO_V2_HEADER_LEN + addr_len) - buf_off_; const auto read_result = io_handle.recv(buf_ + buf_off_, missing, 0); - if (!result.ok() || read_result.return_value_ != uint64_t(missing)) { + if (!read_result.ok() || read_result.return_value_ != uint64_t(missing)) { ENVOY_LOG(debug, "failed to read proxy protocol (remote closed)"); return ReadOrParseState::Error; } diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc index 669cc315ed3e0..098add77b56a4 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc @@ -22,6 +22,8 @@ namespace Extensions { namespace NetworkFilters { namespace ClientSslAuth { +constexpr absl::string_view AuthDigestNoMatch = "auth_digest_no_match"; + ClientSslAuthConfig::ClientSslAuthConfig( const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher, @@ -121,6 +123,9 @@ void ClientSslAuthFilter::onEvent(Network::ConnectionEvent event) { if (!config_->allowedPrincipals().allowed( read_callbacks_->connection().ssl()->sha256PeerCertificateDigest())) { + read_callbacks_->connection().streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::UpstreamProtocolError); + read_callbacks_->connection().streamInfo().setResponseCodeDetails(AuthDigestNoMatch); config_->stats().auth_digest_no_match_.inc(); read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); return; diff --git a/source/extensions/filters/network/client_ssl_auth/config.cc b/source/extensions/filters/network/client_ssl_auth/config.cc index f974b275274fa..dcd2db9f47a43 100644 --- a/source/extensions/filters/network/client_ssl_auth/config.cc +++ b/source/extensions/filters/network/client_ssl_auth/config.cc @@ -19,7 +19,7 @@ Network::FilterFactoryCb ClientSslAuthConfigFactory::createFilterFactoryFromProt ASSERT(!proto_config.stat_prefix().empty()); ClientSslAuthConfigSharedPtr filter_config(ClientSslAuthConfig::create( - proto_config, context.threadLocal(), context.clusterManager(), context.dispatcher(), + proto_config, context.threadLocal(), context.clusterManager(), context.mainThreadDispatcher(), context.scope(), context.api().randomGenerator())); return [filter_config](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared(filter_config)); diff --git a/source/extensions/filters/network/dubbo_proxy/config.cc b/source/extensions/filters/network/dubbo_proxy/config.cc index 8bc19cd46663a..ad379489f362f 100644 --- a/source/extensions/filters/network/dubbo_proxy/config.cc +++ b/source/extensions/filters/network/dubbo_proxy/config.cc @@ -22,8 +22,9 @@ Network::FilterFactoryCb DubboProxyFilterConfigFactory::createFilterFactoryFromP std::shared_ptr filter_config(std::make_shared(proto_config, context)); return [filter_config, &context](Network::FilterManager& filter_manager) -> void { - filter_manager.addReadFilter(std::make_shared( - *filter_config, context.api().randomGenerator(), context.dispatcher().timeSource())); + filter_manager.addReadFilter( + std::make_shared(*filter_config, context.api().randomGenerator(), + context.mainThreadDispatcher().timeSource())); }; } diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 53223b12160c3..42e3da563f829 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -151,8 +151,8 @@ Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryCont std::shared_ptr date_provider = context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(date_provider), [&context] { - return std::make_shared(context.dispatcher(), - context.threadLocal()); + return std::make_shared( + context.mainThreadDispatcher(), context.threadLocal()); }); Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager = @@ -227,7 +227,7 @@ HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoAndHopByHo auto hcm = std::make_shared( *filter_config, context.drainDecision(), context.api().randomGenerator(), context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(), - context.overloadManager(), context.dispatcher().timeSource()); + context.overloadManager(), context.mainThreadDispatcher().timeSource()); if (!clear_hop_by_hop_headers) { hcm->setClearHopByHopResponseHeaders(false); } @@ -826,7 +826,7 @@ HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( auto conn_manager = std::make_unique( *filter_config, context.drainDecision(), context.api().randomGenerator(), context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(), - context.overloadManager(), context.dispatcher().timeSource()); + context.overloadManager(), context.mainThreadDispatcher().timeSource()); if (!clear_hop_by_hop_headers) { conn_manager->setClearHopByHopResponseHeaders(false); } diff --git a/source/extensions/filters/network/local_ratelimit/config.cc b/source/extensions/filters/network/local_ratelimit/config.cc index e9d2c907bd102..0ae1e20adfb98 100644 --- a/source/extensions/filters/network/local_ratelimit/config.cc +++ b/source/extensions/filters/network/local_ratelimit/config.cc @@ -14,7 +14,7 @@ Network::FilterFactoryCb LocalRateLimitConfigFactory::createFilterFactoryFromPro const envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& proto_config, Server::Configuration::FactoryContext& context) { ConfigSharedPtr filter_config( - new Config(proto_config, context.dispatcher(), context.scope(), context.runtime())); + new Config(proto_config, context.mainThreadDispatcher(), context.scope(), context.runtime())); return [filter_config](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared(filter_config)); }; diff --git a/source/extensions/filters/network/mongo_proxy/config.cc b/source/extensions/filters/network/mongo_proxy/config.cc index 79597c01087ed..4a1fad3d227ea 100644 --- a/source/extensions/filters/network/mongo_proxy/config.cc +++ b/source/extensions/filters/network/mongo_proxy/config.cc @@ -25,7 +25,7 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP AccessLogSharedPtr access_log; if (!proto_config.access_log().empty()) { access_log = std::make_shared(proto_config.access_log(), context.accessLogManager(), - context.dispatcher().timeSource()); + context.mainThreadDispatcher().timeSource()); } Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config; @@ -45,7 +45,8 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP stats](Network::FilterManager& filter_manager) -> void { filter_manager.addFilter(std::make_shared( stat_prefix, context.scope(), context.runtime(), access_log, fault_config, - context.drainDecision(), context.dispatcher().timeSource(), emit_dynamic_metadata, stats)); + context.drainDecision(), context.mainThreadDispatcher().timeSource(), emit_dynamic_metadata, + stats)); }; } diff --git a/source/extensions/filters/network/ratelimit/BUILD b/source/extensions/filters/network/ratelimit/BUILD index 50f8ef0368984..a3a90498d881a 100644 --- a/source/extensions/filters/network/ratelimit/BUILD +++ b/source/extensions/filters/network/ratelimit/BUILD @@ -16,12 +16,6 @@ envoy_cc_library( name = "ratelimit_lib", srcs = ["ratelimit.cc"], hdrs = ["ratelimit.h"], - # Legacy test use. TODO(#9953) clean up. - visibility = [ - "//source/extensions:__subpackages__", - "//test/common/network:__pkg__", - "//test/extensions:__subpackages__", - ], deps = [ "//envoy/network:connection_interface", "//envoy/network:filter_interface", diff --git a/source/extensions/filters/network/rbac/config.cc b/source/extensions/filters/network/rbac/config.cc index b5fc803f5202e..3c95a5987956b 100644 --- a/source/extensions/filters/network/rbac/config.cc +++ b/source/extensions/filters/network/rbac/config.cc @@ -79,7 +79,8 @@ RoleBasedAccessControlNetworkFilterConfigFactory::createFilterFactoryFromProtoTy validateRbacRules(proto_config.rules()); validateRbacRules(proto_config.shadow_rules()); RoleBasedAccessControlFilterConfigSharedPtr config( - std::make_shared(proto_config, context.scope())); + std::make_shared(proto_config, context.scope(), + context.messageValidationVisitor())); return [config](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared(config)); }; diff --git a/source/extensions/filters/network/rbac/rbac_filter.cc b/source/extensions/filters/network/rbac/rbac_filter.cc index 9bd6b6723c244..6eb21b13495e2 100644 --- a/source/extensions/filters/network/rbac/rbac_filter.cc +++ b/source/extensions/filters/network/rbac/rbac_filter.cc @@ -14,12 +14,13 @@ namespace NetworkFilters { namespace RBACFilter { RoleBasedAccessControlFilterConfig::RoleBasedAccessControlFilterConfig( - const envoy::extensions::filters::network::rbac::v3::RBAC& proto_config, Stats::Scope& scope) + const envoy::extensions::filters::network::rbac::v3::RBAC& proto_config, Stats::Scope& scope, + ProtobufMessage::ValidationVisitor& validation_visitor) : stats_(Filters::Common::RBAC::generateStats(proto_config.stat_prefix(), proto_config.shadow_rules_stat_prefix(), scope)), shadow_rules_stat_prefix_(proto_config.shadow_rules_stat_prefix()), - engine_(Filters::Common::RBAC::createEngine(proto_config)), - shadow_engine_(Filters::Common::RBAC::createShadowEngine(proto_config)), + engine_(Filters::Common::RBAC::createEngine(proto_config, validation_visitor)), + shadow_engine_(Filters::Common::RBAC::createShadowEngine(proto_config, validation_visitor)), enforcement_type_(proto_config.enforcement_type()) {} Network::FilterStatus RoleBasedAccessControlFilter::onData(Buffer::Instance&, bool) { diff --git a/source/extensions/filters/network/rbac/rbac_filter.h b/source/extensions/filters/network/rbac/rbac_filter.h index d5eabcc2b4737..f43b3853b8884 100644 --- a/source/extensions/filters/network/rbac/rbac_filter.h +++ b/source/extensions/filters/network/rbac/rbac_filter.h @@ -27,7 +27,8 @@ struct Result { class RoleBasedAccessControlFilterConfig { public: RoleBasedAccessControlFilterConfig( - const envoy::extensions::filters::network::rbac::v3::RBAC& proto_config, Stats::Scope& scope); + const envoy::extensions::filters::network::rbac::v3::RBAC& proto_config, Stats::Scope& scope, + ProtobufMessage::ValidationVisitor& validation_visitor); Filters::Common::RBAC::RoleBasedAccessControlFilterStats& stats() { return stats_; } std::string shadowEffectivePolicyIdField() const { diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index b356418325799..bdef53f8fbcb6 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -38,7 +38,7 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager = Extensions::Common::Redis::getClusterRefreshManager( - context.singletonManager(), context.dispatcher(), context.clusterManager(), + context.singletonManager(), context.mainThreadDispatcher(), context.clusterManager(), context.timeSource()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index 335b62d0192ca..9549110461d30 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -20,7 +20,7 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/tcp_proxy", "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", - "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg_cc_proto", ], ) @@ -33,6 +33,6 @@ envoy_cc_extension( "//source/extensions/common/dynamic_forward_proxy:dns_cache_manager_impl", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc index dedff7689748e..14c2396cdf459 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc @@ -19,8 +19,7 @@ SniDynamicForwardProxyNetworkFilterConfigFactory::createFilterFactoryFromProtoTy const FilterConfig& proto_config, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.threadLocal(), context.api(), - context.runtime(), context.scope(), context.messageValidationVisitor()); + context); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.h b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.h index f2aea2c4b85fa..628181f775fa0 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.h +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.h" -#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.validate.h" +#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.pb.h" +#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.pb.validate.h" #include "source/extensions/filters/network/common/factory_base.h" #include "source/extensions/filters/network/well_known_names.h" @@ -12,7 +12,7 @@ namespace NetworkFilters { namespace SniDynamicForwardProxy { using FilterConfig = - envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3alpha::FilterConfig; + envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3::FilterConfig; /** * Config registration for the sni_dynamic_forward_proxy filter. @see diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc index 3c8f6e0798906..b04749aabef10 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc @@ -51,24 +51,21 @@ Network::FilterStatus ProxyFilter::onNewConnection() { } switch (result.status_) { - case LoadDnsCacheEntryStatus::InCache: { + case LoadDnsCacheEntryStatus::InCache: ASSERT(cache_load_handle_ == nullptr); ENVOY_CONN_LOG(debug, "DNS cache entry already loaded, continuing", read_callbacks_->connection()); return Network::FilterStatus::Continue; - } - case LoadDnsCacheEntryStatus::Loading: { + case LoadDnsCacheEntryStatus::Loading: ASSERT(cache_load_handle_ != nullptr); ENVOY_CONN_LOG(debug, "waiting to load DNS cache entry", read_callbacks_->connection()); return Network::FilterStatus::StopIteration; - } - case LoadDnsCacheEntryStatus::Overflow: { + case LoadDnsCacheEntryStatus::Overflow: ASSERT(cache_load_handle_ == nullptr); ENVOY_CONN_LOG(debug, "DNS cache overflow", read_callbacks_->connection()); read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); return Network::FilterStatus::StopIteration; } - } NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h index e7f0de159d20d..23785a275090b 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.h" +#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.pb.h" #include "envoy/network/filter.h" #include "envoy/upstream/cluster_manager.h" @@ -13,7 +13,7 @@ namespace NetworkFilters { namespace SniDynamicForwardProxy { using FilterConfig = - envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3alpha::FilterConfig; + envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3::FilterConfig; class ProxyFilterConfig { public: diff --git a/source/extensions/filters/network/thrift_proxy/auto_protocol_impl.h b/source/extensions/filters/network/thrift_proxy/auto_protocol_impl.h index c15d50e3de9d2..7f931199d31ba 100644 --- a/source/extensions/filters/network/thrift_proxy/auto_protocol_impl.h +++ b/source/extensions/filters/network/thrift_proxy/auto_protocol_impl.h @@ -6,6 +6,7 @@ #include "source/common/common/fmt.h" #include "source/extensions/filters/network/thrift_proxy/protocol.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" namespace Envoy { namespace Extensions { @@ -33,6 +34,9 @@ class AutoProtocolImpl : public Protocol { bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override; bool readMessageEnd(Buffer::Instance& buffer) override; + bool peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) override { + return protocol_->peekReplyPayload(buffer, reply_type); + } bool readStructBegin(Buffer::Instance& buffer, std::string& name) override { return protocol_->readStructBegin(buffer, name); } diff --git a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc index 805ec973e0213..961f89a8277a0 100644 --- a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc @@ -60,6 +60,33 @@ bool BinaryProtocolImpl::readMessageEnd(Buffer::Instance& buffer) { return true; } +bool BinaryProtocolImpl::peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) { + // binary protocol does not transmit struct names so go straight to peek at field begin + // FieldType::Stop is encoded as 1 byte. + if (buffer.length() < 1) { + return false; + } + + FieldType type = static_cast(buffer.peekInt()); + if (type == FieldType::Stop) { + // If the first field is stop then response is void success + reply_type = ReplyType::Success; + return true; + } + + if (buffer.length() < 3) { + return false; + } + + int16_t id = buffer.peekBEInt(1); + if (id < 0) { + throw EnvoyException(absl::StrCat("invalid binary protocol field id ", id)); + } + // successful response struct in field id 0, error (IDL exception) in field id greater than 0 + reply_type = id == 0 ? ReplyType::Success : ReplyType::Error; + return true; +} + bool BinaryProtocolImpl::readStructBegin(Buffer::Instance& buffer, std::string& name) { UNREFERENCED_PARAMETER(buffer); name.clear(); // binary protocol does not transmit struct names diff --git a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.h b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.h index a3497aa70a6dd..1550e8d226107 100644 --- a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.h +++ b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.h @@ -6,6 +6,7 @@ #include "envoy/common/pure.h" #include "source/extensions/filters/network/thrift_proxy/protocol.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" namespace Envoy { namespace Extensions { @@ -25,6 +26,7 @@ class BinaryProtocolImpl : public Protocol { ProtocolType type() const override { return ProtocolType::Binary; } bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override; bool readMessageEnd(Buffer::Instance& buffer) override; + bool peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) override; bool readStructBegin(Buffer::Instance& buffer, std::string& name) override; bool readStructEnd(Buffer::Instance& buffer) override; bool readFieldBegin(Buffer::Instance& buffer, std::string& name, FieldType& field_type, diff --git a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc index 4bab4da2e625f..3aef9e205aa71 100644 --- a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc @@ -79,6 +79,46 @@ bool CompactProtocolImpl::readMessageEnd(Buffer::Instance& buffer) { return true; } +bool CompactProtocolImpl::peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) { + // compact protocol does not transmit struct names so go straight to peek for field begin + // Minimum size: FieldType::Stop is encoded as 1 byte. + if (buffer.length() < 1) { + return false; + } + + uint8_t delta_and_type = buffer.peekInt(); + if ((delta_and_type & 0x0f) == 0) { + // Type is stop, no need to do further decoding + // If the first field is stop then response is void success + reply_type = ReplyType::Success; + return true; + } + + if ((delta_and_type >> 4) != 0) { + // field id delta is non zero and so is an IDL exception (success field id is 0) + reply_type = ReplyType::Error; + return true; + } + + int id_size = 0; + // Field ID delta is zero: this is a long-form field header, followed by zig-zag field id. + if (buffer.length() < 2) { + return false; + } + + int32_t id = BufferHelper::peekZigZagI32(buffer, 1, id_size); + if (id_size < 0) { + return false; + } + + if (id < 0 || id > std::numeric_limits::max()) { + throw EnvoyException(absl::StrCat("invalid compact protocol field id ", id)); + } + // successful response struct in field id 0, error (IDL exception) in field id greater than 0 + reply_type = id == 0 ? ReplyType::Success : ReplyType::Error; + return true; +} + bool CompactProtocolImpl::readStructBegin(Buffer::Instance& buffer, std::string& name) { UNREFERENCED_PARAMETER(buffer); name.clear(); // compact protocol does not transmit struct names diff --git a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.h b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.h index fb89e613ece3e..a2c978bcf2f86 100644 --- a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.h +++ b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.h @@ -7,6 +7,7 @@ #include "envoy/common/pure.h" #include "source/extensions/filters/network/thrift_proxy/protocol.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" #include "absl/types/optional.h" @@ -28,6 +29,7 @@ class CompactProtocolImpl : public Protocol { ProtocolType type() const override { return ProtocolType::Compact; } bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override; bool readMessageEnd(Buffer::Instance& buffer) override; + bool peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) override; bool readStructBegin(Buffer::Instance& buffer, std::string& name) override; bool readStructEnd(Buffer::Instance& buffer) override; bool readFieldBegin(Buffer::Instance& buffer, std::string& name, FieldType& field_type, diff --git a/source/extensions/filters/network/thrift_proxy/config.cc b/source/extensions/filters/network/thrift_proxy/config.cc index ae272ed1b480f..180503969176f 100644 --- a/source/extensions/filters/network/thrift_proxy/config.cc +++ b/source/extensions/filters/network/thrift_proxy/config.cc @@ -102,8 +102,9 @@ Network::FilterFactoryCb ThriftProxyFilterConfigFactory::createFilterFactoryFrom std::shared_ptr filter_config(new ConfigImpl(proto_config, context)); return [filter_config, &context](Network::FilterManager& filter_manager) -> void { - filter_manager.addReadFilter(std::make_shared( - *filter_config, context.api().randomGenerator(), context.dispatcher().timeSource())); + filter_manager.addReadFilter( + std::make_shared(*filter_config, context.api().randomGenerator(), + context.mainThreadDispatcher().timeSource())); }; } diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.cc b/source/extensions/filters/network/thrift_proxy/conn_manager.cc index 1e100247e59cc..531340105ca02 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.cc +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.cc @@ -210,44 +210,19 @@ bool ConnectionManager::ResponseDecoder::onData(Buffer::Instance& data) { return complete_; } +FilterStatus ConnectionManager::ResponseDecoder::passthroughData(Buffer::Instance& data) { + passthrough_ = true; + return ProtocolConverter::passthroughData(data); +} + FilterStatus ConnectionManager::ResponseDecoder::messageBegin(MessageMetadataSharedPtr metadata) { metadata_ = metadata; metadata_->setSequenceId(parent_.original_sequence_id_); - first_reply_field_ = - (metadata->hasMessageType() && metadata->messageType() == MessageType::Reply); - return ProtocolConverter::messageBegin(metadata); -} - -FilterStatus ConnectionManager::ResponseDecoder::fieldBegin(absl::string_view name, - FieldType& field_type, - int16_t& field_id) { - if (first_reply_field_) { - // Reply messages contain a struct where field 0 is the call result and fields 1+ are - // exceptions, if defined. At most one field may be set. Therefore, the very first field we - // encounter in a reply is either field 0 (success) or not (IDL exception returned). - // If first fieldType is FieldType::Stop then it is a void success and handled in messageEnd() - // because decoder state machine does not call decoder event callback fieldBegin on - // FieldType::Stop. - success_ = (field_id == 0); - first_reply_field_ = false; - } - - return ProtocolConverter::fieldBegin(name, field_type, field_id); -} - -FilterStatus ConnectionManager::ResponseDecoder::messageEnd() { - if (first_reply_field_) { - // When the response is thrift void type there is never a fieldBegin call on a success - // because the response struct has no fields and so the first field type is FieldType::Stop. - // The decoder state machine handles FieldType::Stop by going immediately to structEnd, - // skipping fieldBegin callback. Therefore if we are still waiting for the first reply field - // at end of message then it is a void success. - success_ = true; - first_reply_field_ = false; + if (metadata->hasReplyType()) { + success_ = metadata->replyType() == ReplyType::Success; } - - return ProtocolConverter::messageEnd(); + return ProtocolConverter::messageBegin(metadata); } FilterStatus ConnectionManager::ResponseDecoder::transportEnd() { @@ -275,14 +250,19 @@ FilterStatus ConnectionManager::ResponseDecoder::transportEnd() { cm.read_callbacks_->connection().write(buffer, false); cm.stats_.response_.inc(); + if (passthrough_) { + cm.stats_.response_passthrough_.inc(); + } switch (metadata_->messageType()) { case MessageType::Reply: cm.stats_.response_reply_.inc(); - if (success_.value_or(false)) { - cm.stats_.response_success_.inc(); - } else { - cm.stats_.response_error_.inc(); + if (success_) { + if (success_.value()) { + cm.stats_.response_success_.inc(); + } else { + cm.stats_.response_error_.inc(); + } } break; @@ -419,6 +399,10 @@ void ConnectionManager::ActiveRpc::finalizeRequest() { parent_.stats_.downstream_cx_max_requests_.inc(); } + if (passthrough_) { + parent_.stats_.request_passthrough_.inc(); + } + bool destroy_rpc = false; switch (original_msg_type_) { case MessageType::Call: @@ -458,6 +442,7 @@ bool ConnectionManager::ActiveRpc::passthroughSupported() const { } FilterStatus ConnectionManager::ActiveRpc::passthroughData(Buffer::Instance& data) { + passthrough_ = true; filter_context_ = &data; filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus { Buffer::Instance* data = absl::any_cast(filter_context_); diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.h b/source/extensions/filters/network/thrift_proxy/conn_manager.h index 064d29b050607..789d54aaffe39 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.h +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.h @@ -74,17 +74,15 @@ class ConnectionManager : public Network::ReadFilter, struct ResponseDecoder : public DecoderCallbacks, public ProtocolConverter { ResponseDecoder(ActiveRpc& parent, Transport& transport, Protocol& protocol) : parent_(parent), decoder_(std::make_unique(transport, protocol, *this)), - complete_(false), first_reply_field_(false) { + complete_(false), passthrough_{false} { initProtocolConverter(*parent_.parent_.protocol_, parent_.response_buffer_); } bool onData(Buffer::Instance& data); // ProtocolConverter + FilterStatus passthroughData(Buffer::Instance& data) override; FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override; - FilterStatus messageEnd() override; - FilterStatus fieldBegin(absl::string_view name, FieldType& field_type, - int16_t& field_id) override; FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override { UNREFERENCED_PARAMETER(metadata); return FilterStatus::Continue; @@ -101,7 +99,7 @@ class ConnectionManager : public Network::ReadFilter, MessageMetadataSharedPtr metadata_; absl::optional success_; bool complete_ : 1; - bool first_reply_field_ : 1; + bool passthrough_ : 1; }; using ResponseDecoderPtr = std::unique_ptr; @@ -155,7 +153,7 @@ class ConnectionManager : public Network::ReadFilter, stream_id_(parent_.random_generator_.random()), stream_info_(parent_.time_source_, parent_.read_callbacks_->connection().connectionInfoProviderSharedPtr()), - local_response_sent_{false}, pending_transport_end_{false} { + local_response_sent_{false}, pending_transport_end_{false}, passthrough_{false} { parent_.stats_.request_active_.inc(); } ~ActiveRpc() override { @@ -245,6 +243,7 @@ class ConnectionManager : public Network::ReadFilter, absl::any filter_context_; bool local_response_sent_ : 1; bool pending_transport_end_ : 1; + bool passthrough_ : 1; }; using ActiveRpcPtr = std::unique_ptr; diff --git a/source/extensions/filters/network/thrift_proxy/decoder.cc b/source/extensions/filters/network/thrift_proxy/decoder.cc index 111703e63da55..66c762a65f006 100644 --- a/source/extensions/filters/network/thrift_proxy/decoder.cc +++ b/source/extensions/filters/network/thrift_proxy/decoder.cc @@ -6,6 +6,7 @@ #include "source/common/common/assert.h" #include "source/common/common/macros.h" #include "source/extensions/filters/network/thrift_proxy/app_exception_impl.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" namespace Envoy { namespace Extensions { @@ -26,19 +27,29 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::passthroughData(Buffer:: } // MessageBegin -> StructBegin +// MessageBegin -> ReplyPayload (reply received, get reply type) DecoderStateMachine::DecoderStatus DecoderStateMachine::messageBegin(Buffer::Instance& buffer) { const auto total = buffer.length(); if (!proto_.readMessageBegin(buffer, *metadata_)) { return {ProtocolState::WaitForData}; } + body_start_ = total - buffer.length(); stack_.clear(); stack_.emplace_back(Frame(ProtocolState::MessageEnd)); + // If a reply peek at the payload to see if success or error (IDL exception) + if (metadata_->hasMessageType() && metadata_->messageType() == MessageType::Reply) { + return {ProtocolState::ReplyPayload, FilterStatus::Continue}; + } + + return handleMessageBegin(); +} +DecoderStateMachine::DecoderStatus DecoderStateMachine::handleMessageBegin() { const auto status = handler_.messageBegin(metadata_); if (callbacks_.passthroughEnabled()) { - body_bytes_ = metadata_->frameSize() - (total - buffer.length()); + body_bytes_ = metadata_->frameSize() - body_start_; return {ProtocolState::PassthroughData, status}; } @@ -54,6 +65,17 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::messageEnd(Buffer::Insta return {ProtocolState::Done, handler_.messageEnd()}; } +// ReplyPayload -> StructBegin +DecoderStateMachine::DecoderStatus DecoderStateMachine::replyPayload(Buffer::Instance& buffer) { + ReplyType reply_type; + if (!proto_.peekReplyPayload(buffer, reply_type)) { + return {ProtocolState::WaitForData}; + } + + metadata_->setReplyType(reply_type); + return handleMessageBegin(); +} + // StructBegin -> FieldBegin DecoderStateMachine::DecoderStatus DecoderStateMachine::structBegin(Buffer::Instance& buffer) { std::string name; @@ -318,6 +340,8 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::handleState(Buffer::Inst return passthroughData(buffer); case ProtocolState::MessageBegin: return messageBegin(buffer); + case ProtocolState::ReplyPayload: + return replyPayload(buffer); case ProtocolState::StructBegin: return structBegin(buffer); case ProtocolState::StructEnd: diff --git a/source/extensions/filters/network/thrift_proxy/decoder.h b/source/extensions/filters/network/thrift_proxy/decoder.h index 99739221c5104..f00bea67baba5 100644 --- a/source/extensions/filters/network/thrift_proxy/decoder.h +++ b/source/extensions/filters/network/thrift_proxy/decoder.h @@ -19,6 +19,7 @@ namespace ThriftProxy { FUNCTION(PassthroughData) \ FUNCTION(MessageBegin) \ FUNCTION(MessageEnd) \ + FUNCTION(ReplyPayload) \ FUNCTION(StructBegin) \ FUNCTION(StructEnd) \ FUNCTION(FieldBegin) \ @@ -134,6 +135,7 @@ class DecoderStateMachine : public Logger::Loggable { DecoderStatus passthroughData(Buffer::Instance& buffer); DecoderStatus messageBegin(Buffer::Instance& buffer); DecoderStatus messageEnd(Buffer::Instance& buffer); + DecoderStatus replyPayload(Buffer::Instance& buffer); DecoderStatus structBegin(Buffer::Instance& buffer); DecoderStatus structEnd(Buffer::Instance& buffer); DecoderStatus fieldBegin(Buffer::Instance& buffer); @@ -150,6 +152,10 @@ class DecoderStateMachine : public Logger::Loggable { DecoderStatus setValue(Buffer::Instance& buffer); DecoderStatus setEnd(Buffer::Instance& buffer); + // handleMessageBegin calls the handler for messageBegin and then determines whether to + // perform payload passthrough or not + DecoderStatus handleMessageBegin(); + // handleValue represents the generic Value state from the state machine documentation. It // returns either ProtocolState::WaitForData if more data is required or the next state. For // structs, lists, maps, or sets the return_state is pushed onto the stack and the next state is @@ -171,6 +177,7 @@ class DecoderStateMachine : public Logger::Loggable { DecoderCallbacks& callbacks_; ProtocolState state_; std::vector stack_; + uint32_t body_start_{}; uint32_t body_bytes_{}; }; diff --git a/source/extensions/filters/network/thrift_proxy/metadata.h b/source/extensions/filters/network/thrift_proxy/metadata.h index 08b91a1c4f040..de44db1948b2d 100644 --- a/source/extensions/filters/network/thrift_proxy/metadata.h +++ b/source/extensions/filters/network/thrift_proxy/metadata.h @@ -53,6 +53,10 @@ class MessageMetadata { copy->setMessageType(messageType()); } + if (hasReplyType()) { + copy->setReplyType(replyType()); + } + Http::HeaderMapImpl::copyFrom(copy->headers(), headers()); copy->mutableSpans().assign(spans().begin(), spans().end()); @@ -115,6 +119,10 @@ class MessageMetadata { MessageType messageType() const { return msg_type_.value(); } void setMessageType(MessageType msg_type) { msg_type_ = msg_type; } + bool hasReplyType() const { return reply_type_.has_value(); } + ReplyType replyType() const { return reply_type_.value(); } + void setReplyType(ReplyType reply_type) { reply_type_ = reply_type; } + /** * @return HeaderMap of current headers (never throws) */ @@ -168,6 +176,7 @@ class MessageMetadata { absl::optional method_name_{}; absl::optional seq_id_{}; absl::optional msg_type_{}; + absl::optional reply_type_{}; Http::HeaderMapPtr headers_{Http::RequestHeaderMapImpl::create()}; absl::optional app_ex_type_; absl::optional app_ex_msg_; diff --git a/source/extensions/filters/network/thrift_proxy/protocol.h b/source/extensions/filters/network/thrift_proxy/protocol.h index a9eae7779e1ec..13b2586c1b55a 100644 --- a/source/extensions/filters/network/thrift_proxy/protocol.h +++ b/source/extensions/filters/network/thrift_proxy/protocol.h @@ -73,6 +73,16 @@ class Protocol { */ virtual bool readMessageEnd(Buffer::Instance& buffer) PURE; + /** + * Peeks the start of a Thrift protocol reply payload in the buffer and updates the reply + * type parameter with the reply type of the payload. + * @param buffer the buffer to peek from + * @param reply_type ReplyType to set the payload's reply type to success or error + * @return true if reply type was successfully read, false if more data is required + * @throw EnvoyException if the data is not a valid payload + */ + virtual bool peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) PURE; + /** * Reads the start of a Thrift struct from the buffer and updates the name parameter with the * value from the struct header. If successful, the struct header is removed from the buffer. diff --git a/source/extensions/filters/network/thrift_proxy/router/config.cc b/source/extensions/filters/network/thrift_proxy/router/config.cc index 3e651b2886b7f..a6b28cc58c051 100644 --- a/source/extensions/filters/network/thrift_proxy/router/config.cc +++ b/source/extensions/filters/network/thrift_proxy/router/config.cc @@ -20,7 +20,7 @@ ThriftFilters::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoT auto shadow_writer = std::make_shared(context.clusterManager(), stat_prefix, context.scope(), - context.dispatcher(), context.threadLocal()); + context.mainThreadDispatcher(), context.threadLocal()); return [&context, stat_prefix, shadow_writer](ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/network/thrift_proxy/router/router.h b/source/extensions/filters/network/thrift_proxy/router/router.h index dfda11ac34407..8806bbd5e4ac5 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router.h +++ b/source/extensions/filters/network/thrift_proxy/router/router.h @@ -357,7 +357,8 @@ class RequestOwner : public ProtocolConverter, public Logger::LoggablehasMessageType() && metadata->messageType() == MessageType::Reply); - return FilterStatus::Continue; - } - FilterStatus messageEnd() override { - if (first_reply_field_) { - success_ = true; - first_reply_field_ = false; - } - return FilterStatus::Continue; - } - FilterStatus fieldBegin(absl::string_view, FieldType&, int16_t& field_id) override { - if (first_reply_field_) { - success_ = (field_id == 0); - first_reply_field_ = false; + if (metadata_->hasReplyType()) { + success_ = metadata_->replyType() == ReplyType::Success; } return FilterStatus::Continue; } @@ -97,7 +84,6 @@ struct NullResponseDecoder : public DecoderCallbacks, public ProtocolConverter { MessageMetadataSharedPtr metadata_; absl::optional success_; bool complete_ : 1; - bool first_reply_field_ : 1; }; using NullResponseDecoderPtr = std::unique_ptr; diff --git a/source/extensions/filters/network/thrift_proxy/stats.h b/source/extensions/filters/network/thrift_proxy/stats.h index 7e57db76f803b..150367e9dfe01 100644 --- a/source/extensions/filters/network/thrift_proxy/stats.h +++ b/source/extensions/filters/network/thrift_proxy/stats.h @@ -22,11 +22,13 @@ namespace ThriftProxy { COUNTER(request_decoding_error) \ COUNTER(request_invalid_type) \ COUNTER(request_oneway) \ + COUNTER(request_passthrough) \ COUNTER(response) \ COUNTER(response_decoding_error) \ COUNTER(response_error) \ COUNTER(response_exception) \ COUNTER(response_invalid_type) \ + COUNTER(response_passthrough) \ COUNTER(response_reply) \ COUNTER(response_success) \ GAUGE(request_active, Accumulate) \ diff --git a/source/extensions/filters/network/thrift_proxy/thrift.h b/source/extensions/filters/network/thrift_proxy/thrift.h index 3092bceb11b7e..bfbb312553513 100644 --- a/source/extensions/filters/network/thrift_proxy/thrift.h +++ b/source/extensions/filters/network/thrift_proxy/thrift.h @@ -118,6 +118,14 @@ enum class MessageType { LastMessageType = Oneway, }; +/** + * A Reply message is either a success or an error (IDL exception) + */ +enum class ReplyType { + Success, + Error, +}; + /** * Thrift protocol struct field types. * See https://github.com/apache/thrift/blob/master/lib/cpp/src/thrift/protocol/TProtocol.h diff --git a/source/extensions/filters/network/wasm/config.cc b/source/extensions/filters/network/wasm/config.cc index 925e538cb67eb..43045f7a53a16 100644 --- a/source/extensions/filters/network/wasm/config.cc +++ b/source/extensions/filters/network/wasm/config.cc @@ -16,6 +16,8 @@ namespace Wasm { Network::FilterFactoryCb WasmFilterConfig::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::network::wasm::v3::Wasm& proto_config, Server::Configuration::FactoryContext& context) { + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); auto filter_config = std::make_shared(proto_config, context); return [filter_config](Network::FilterManager& filter_manager) -> void { auto filter = filter_config->createFilter(); diff --git a/source/extensions/filters/network/wasm/wasm_filter.cc b/source/extensions/filters/network/wasm/wasm_filter.cc index 2ee4ea033a36a..d5f1a6a3ba382 100644 --- a/source/extensions/filters/network/wasm/wasm_filter.cc +++ b/source/extensions/filters/network/wasm/wasm_filter.cc @@ -21,8 +21,8 @@ FilterConfig::FilterConfig(const envoy::extensions::filters::network::wasm::v3:: }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { throw Common::Wasm::WasmException( fmt::format("Unable to create Wasm network filter {}", plugin->name_)); diff --git a/source/extensions/filters/network/zookeeper_proxy/config.cc b/source/extensions/filters/network/zookeeper_proxy/config.cc index 0a980cf96152d..833eb4563b321 100644 --- a/source/extensions/filters/network/zookeeper_proxy/config.cc +++ b/source/extensions/filters/network/zookeeper_proxy/config.cc @@ -30,7 +30,7 @@ Network::FilterFactoryCb ZooKeeperConfigFactory::createFilterFactoryFromProtoTyp ZooKeeperFilterConfigSharedPtr filter_config( std::make_shared(stat_prefix, max_packet_bytes, context.scope())); - auto& time_source = context.dispatcher().timeSource(); + auto& time_source = context.mainThreadDispatcher().timeSource(); return [filter_config, &time_source](Network::FilterManager& filter_manager) -> void { filter_manager.addFilter(std::make_shared(filter_config, time_source)); diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD index 8dc000cc2ea15..dff84e520c8a6 100644 --- a/source/extensions/filters/udp/dns_filter/BUILD +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -42,7 +42,7 @@ envoy_cc_library( "//source/common/protobuf:message_validator_lib", "//source/common/runtime:runtime_lib", "//source/common/upstream:cluster_manager_lib", - "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3:pkg_cc_proto", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], ) @@ -55,6 +55,6 @@ envoy_cc_extension( ":dns_filter_lib", "//envoy/registry", "//envoy/server:filter_config_interface", - "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/udp/dns_filter/config.cc b/source/extensions/filters/udp/dns_filter/config.cc index 28f00e6a445f2..53745a01f8eba 100644 --- a/source/extensions/filters/udp/dns_filter/config.cc +++ b/source/extensions/filters/udp/dns_filter/config.cc @@ -9,7 +9,7 @@ Network::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryF const Protobuf::Message& config, Server::Configuration::ListenerFactoryContext& context) { auto shared_config = std::make_shared( context, MessageUtil::downcastAndValidate< - const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig&>( + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig&>( config, context.messageValidationVisitor())); return [shared_config](Network::UdpListenerFilterManager& filter_manager, @@ -19,7 +19,7 @@ Network::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryF } ProtobufTypes::MessagePtr DnsFilterConfigFactory::createEmptyConfigProto() { - return std::make_unique(); + return std::make_unique(); } std::string DnsFilterConfigFactory::name() const { return "envoy.filters.udp.dns_filter"; } diff --git a/source/extensions/filters/udp/dns_filter/config.h b/source/extensions/filters/udp/dns_filter/config.h index 9278199a26d68..77723946b8abf 100644 --- a/source/extensions/filters/udp/dns_filter/config.h +++ b/source/extensions/filters/udp/dns_filter/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.validate.h" #include "envoy/server/filter_config.h" #include "source/extensions/filters/udp/dns_filter/dns_filter.h" diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index 1e869093389d2..11f74cfb044f7 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -18,11 +18,11 @@ static constexpr std::chrono::seconds DEFAULT_RESOLVER_TTL{300}; DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, - const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config) + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig& config) : root_scope_(context.scope()), cluster_manager_(context.clusterManager()), api_(context.api()), stats_(generateStats(config.stat_prefix(), root_scope_)), resolver_timeout_(DEFAULT_RESOLVER_TIMEOUT), random_(context.api().randomGenerator()) { - using envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig; + using envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig; const auto& server_config = config.server_config(); @@ -189,8 +189,8 @@ void DnsFilterEnvoyConfig::addEndpointToSuffix(const absl::string_view suffix, } bool DnsFilterEnvoyConfig::loadServerConfig( - const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig:: - ServerContextConfig& config, + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig::ServerContextConfig& + config, envoy::data::dns::v3::DnsTable& table) { using envoy::data::dns::v3::DnsTable; diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.h b/source/extensions/filters/udp/dns_filter/dns_filter.h index bdcfae1239fd7..331ab1a8bd2c9 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter.h @@ -1,7 +1,7 @@ #pragma once #include "envoy/event/file_event.h" -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" #include "envoy/network/dns.h" #include "envoy/network/filter.h" @@ -78,7 +78,7 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { public: DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, - const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config); + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig& config); DnsFilterStats& stats() const { return stats_; } const absl::flat_hash_map& domainTtl() const { @@ -105,9 +105,10 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { POOL_HISTOGRAM_PREFIX(scope, final_prefix))}; } - bool loadServerConfig(const envoy::extensions::filters::udp::dns_filter::v3alpha:: - DnsFilterConfig::ServerContextConfig& config, - envoy::data::dns::v3::DnsTable& table); + bool loadServerConfig( + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig::ServerContextConfig& + config, + envoy::data::dns::v3::DnsTable& table); void addEndpointToSuffix(const absl::string_view suffix, const absl::string_view domain_name, DnsEndpointConfig& endpoint_config); diff --git a/source/extensions/filters/udp/dns_filter/dns_filter_utils.h b/source/extensions/filters/udp/dns_filter/dns_filter_utils.h index b2e4565c62194..8294930c315db 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter_utils.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter_utils.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" #include "envoy/network/address.h" #include "source/extensions/filters/udp/dns_filter/dns_filter_constants.h" diff --git a/source/extensions/grpc_credentials/file_based_metadata/config.cc b/source/extensions/grpc_credentials/file_based_metadata/config.cc index 81fa4aa783435..aebba3b17150a 100644 --- a/source/extensions/grpc_credentials/file_based_metadata/config.cc +++ b/source/extensions/grpc_credentials/file_based_metadata/config.cc @@ -69,11 +69,12 @@ FileBasedMetadataAuthenticator::GetMetadata(grpc::string_ref, grpc::string_ref, if (!config_.header_key().empty()) { header_key = config_.header_key(); } - TRY_ASSERT_MAIN_THREAD { + // TODO(#14320): avoid using an exception here or find some way of doing this + // in the main thread. + TRY_NEEDS_AUDIT { std::string header_value = Envoy::Config::DataSource::read(config_.secret_data(), true, api_); metadata->insert(std::make_pair(header_key, header_prefix + header_value)); } - END_TRY catch (const EnvoyException& e) { return grpc::Status(grpc::StatusCode::NOT_FOUND, e.what()); } diff --git a/source/extensions/health_checkers/redis/config.cc b/source/extensions/health_checkers/redis/config.cc index 880c2b34de770..06f73368ee7eb 100644 --- a/source/extensions/health_checkers/redis/config.cc +++ b/source/extensions/health_checkers/redis/config.cc @@ -18,8 +18,8 @@ Upstream::HealthCheckerSharedPtr RedisHealthCheckerFactory::createCustomHealthCh Server::Configuration::HealthCheckerFactoryContext& context) { return std::make_shared( context.cluster(), config, - getRedisHealthCheckConfig(config, context.messageValidationVisitor()), context.dispatcher(), - context.runtime(), context.eventLogger(), context.api(), + getRedisHealthCheckConfig(config, context.messageValidationVisitor()), + context.mainThreadDispatcher(), context.runtime(), context.eventLogger(), context.api(), NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_); }; diff --git a/source/extensions/internal_redirect/allow_listed_routes/BUILD b/source/extensions/internal_redirect/allow_listed_routes/BUILD index bb6a1f6091dbd..a149d39d42959 100644 --- a/source/extensions/internal_redirect/allow_listed_routes/BUILD +++ b/source/extensions/internal_redirect/allow_listed_routes/BUILD @@ -23,10 +23,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up by moving the redirect test to extensions. - extra_visibility = [ - "//test/integration:__subpackages__", - ], deps = [ ":allow_listed_routes_lib", "//envoy/registry", diff --git a/source/extensions/internal_redirect/previous_routes/BUILD b/source/extensions/internal_redirect/previous_routes/BUILD index 969d1ac9a13b6..d7f98d55be9dc 100644 --- a/source/extensions/internal_redirect/previous_routes/BUILD +++ b/source/extensions/internal_redirect/previous_routes/BUILD @@ -23,10 +23,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up by moving the redirect test to extensions. - extra_visibility = [ - "//test/integration:__subpackages__", - ], deps = [ ":previous_routes_lib", "//envoy/registry", diff --git a/source/extensions/internal_redirect/safe_cross_scheme/BUILD b/source/extensions/internal_redirect/safe_cross_scheme/BUILD index bc464c310b148..7afb838b8ebe2 100644 --- a/source/extensions/internal_redirect/safe_cross_scheme/BUILD +++ b/source/extensions/internal_redirect/safe_cross_scheme/BUILD @@ -22,10 +22,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up by moving the redirect test to extensions. - extra_visibility = [ - "//test/integration:__subpackages__", - ], deps = [ ":safe_cross_scheme_lib", "//envoy/registry", diff --git a/source/extensions/key_value/file_based/BUILD b/source/extensions/key_value/file_based/BUILD index 4603b869b5908..5237ea3cdb847 100644 --- a/source/extensions/key_value/file_based/BUILD +++ b/source/extensions/key_value/file_based/BUILD @@ -19,7 +19,7 @@ envoy_cc_extension( "//envoy/filesystem:filesystem_interface", "//envoy/registry", "//source/common/common:key_value_store_lib", - "@envoy_api//envoy/extensions/common/key_value/v3:pkg_cc_proto", + "@envoy_api//envoy/config/common/key_value/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/key_value/file_based/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/key_value/file_based/config.cc b/source/extensions/key_value/file_based/config.cc index 6fbd99b77cffc..11568c625c1ac 100644 --- a/source/extensions/key_value/file_based/config.cc +++ b/source/extensions/key_value/file_based/config.cc @@ -44,8 +44,7 @@ KeyValueStorePtr FileBasedKeyValueStoreFactory::createStore( const Protobuf::Message& config, ProtobufMessage::ValidationVisitor& validation_visitor, Event::Dispatcher& dispatcher, Filesystem::Instance& file_system) { const auto& typed_config = MessageUtil::downcastAndValidate< - const envoy::extensions::common::key_value::v3::KeyValueStoreConfig&>(config, - validation_visitor); + const envoy::config::common::key_value::v3::KeyValueStoreConfig&>(config, validation_visitor); const auto file_config = MessageUtil::anyConvertAndValidate< envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig>( typed_config.config().typed_config(), validation_visitor); diff --git a/source/extensions/key_value/file_based/config.h b/source/extensions/key_value/file_based/config.h index 414b7d7473185..4b1cccec616d1 100644 --- a/source/extensions/key_value/file_based/config.h +++ b/source/extensions/key_value/file_based/config.h @@ -1,6 +1,6 @@ #include "envoy/common/key_value_store.h" -#include "envoy/extensions/common/key_value/v3/config.pb.h" -#include "envoy/extensions/common/key_value/v3/config.pb.validate.h" +#include "envoy/config/common/key_value/v3/config.pb.h" +#include "envoy/config/common/key_value/v3/config.pb.validate.h" #include "envoy/extensions/key_value/file_based/v3/config.pb.h" #include "envoy/extensions/key_value/file_based/v3/config.pb.validate.h" diff --git a/source/extensions/resource_monitors/injected_resource/injected_resource_monitor.cc b/source/extensions/resource_monitors/injected_resource/injected_resource_monitor.cc index 6aa0aeb76858d..d6797ac85ec2f 100644 --- a/source/extensions/resource_monitors/injected_resource/injected_resource_monitor.cc +++ b/source/extensions/resource_monitors/injected_resource/injected_resource_monitor.cc @@ -16,7 +16,7 @@ InjectedResourceMonitor::InjectedResourceMonitor( config, Server::Configuration::ResourceMonitorFactoryContext& context) : filename_(config.filename()), file_changed_(true), - watcher_(context.dispatcher().createFilesystemWatcher()), api_(context.api()) { + watcher_(context.mainThreadDispatcher().createFilesystemWatcher()), api_(context.api()) { watcher_->addWatch(filename_, Filesystem::Watcher::Events::MovedTo, [this](uint32_t) { onFileChanged(); }); } diff --git a/source/extensions/stat_sinks/statsd/BUILD b/source/extensions/stat_sinks/statsd/BUILD index 1443316f71519..50be7094693a5 100644 --- a/source/extensions/stat_sinks/statsd/BUILD +++ b/source/extensions/stat_sinks/statsd/BUILD @@ -14,7 +14,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # Legacy test use. TODO(#9953) clean up. deps = [ "//envoy/registry", "//source/common/network:address_lib", diff --git a/source/extensions/stat_sinks/wasm/config.cc b/source/extensions/stat_sinks/wasm/config.cc index 1eec57ece7efa..be72fb3d18610 100644 --- a/source/extensions/stat_sinks/wasm/config.cc +++ b/source/extensions/stat_sinks/wasm/config.cc @@ -36,18 +36,20 @@ WasmSinkFactory::createStatsSink(const Protobuf::Message& proto_config, } return; } - wasm_sink->setSingleton( - Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, context.dispatcher())); + wasm_sink->setSingleton(Common::Wasm::getOrCreateThreadLocalPlugin( + base_wasm, plugin, context.mainThreadDispatcher())); }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { throw Common::Wasm::WasmException( fmt::format("Unable to create Wasm Stat Sink {}", plugin->name_)); } + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); return wasm_sink; } diff --git a/source/extensions/tracers/xray/BUILD b/source/extensions/tracers/xray/BUILD index b3044a98df675..e13753344d6a4 100644 --- a/source/extensions/tracers/xray/BUILD +++ b/source/extensions/tracers/xray/BUILD @@ -52,6 +52,7 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/common/runtime:runtime_lib", "//source/common/tracing:common_values_lib", + "//source/common/tracing:http_tracer_lib", "//source/common/tracing:null_span_lib", ], ) diff --git a/source/extensions/tracers/xray/tracer.cc b/source/extensions/tracers/xray/tracer.cc index 76792fb4cc16d..418b3766748fe 100644 --- a/source/extensions/tracers/xray/tracer.cc +++ b/source/extensions/tracers/xray/tracer.cc @@ -10,6 +10,7 @@ #include "source/common/common/assert.h" #include "source/common/common/fmt.h" #include "source/common/protobuf/utility.h" +#include "source/common/tracing/http_tracer_impl.h" #include "source/extensions/tracers/xray/daemon.pb.validate.h" namespace Envoy { @@ -18,7 +19,8 @@ namespace Tracers { namespace XRay { namespace { -constexpr auto XRaySerializationVersion = "1"; +constexpr absl::string_view XRaySerializationVersion = "1"; +constexpr absl::string_view DirectionKey = "direction"; // X-Ray Trace ID Format // @@ -35,19 +37,14 @@ constexpr auto XRaySerializationVersion = "1"; std::string generateTraceId(SystemTime point_in_time, Random::RandomGenerator& random) { using std::chrono::seconds; using std::chrono::time_point_cast; - const auto epoch = time_point_cast(point_in_time).time_since_epoch().count(); - std::string out; - out.reserve(35); - out += XRaySerializationVersion; - out.push_back('-'); // epoch in seconds represented as 8 hexadecimal characters - out += Hex::uint32ToHex(epoch); - out.push_back('-'); + const auto epoch = time_point_cast(point_in_time).time_since_epoch().count(); std::string uuid = random.uuid(); // unique id represented as 24 hexadecimal digits and no dashes uuid.erase(std::remove(uuid.begin(), uuid.end(), '-'), uuid.end()); ASSERT(uuid.length() >= 24); - out += uuid.substr(0, 24); + const std::string out = + absl::StrCat(XRaySerializationVersion, "-", Hex::uint32ToHex(epoch), "-", uuid.substr(0, 24)); return out; } @@ -93,6 +90,8 @@ void Span::finishSpan() { for (const auto& item : custom_annotations_) { s.mutable_annotations()->insert({item.first, item.second}); } + // `direction` will be either "ingress" or "egress" + s.mutable_annotations()->insert({std::string(DirectionKey), direction()}); const std::string json = MessageUtil::getJsonStringFromMessageOrDie( s, false /* pretty_print */, false /* always_print_primitive_fields */); @@ -106,11 +105,12 @@ void Span::injectContext(Tracing::TraceContext& trace_context) { trace_context.setByReferenceKey(XRayTraceHeader, xray_header_value); } -Tracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& operation_name, +Tracing::SpanPtr Span::spawnChild(const Tracing::Config& config, const std::string& operation_name, Envoy::SystemTime start_time) { auto child_span = std::make_unique(time_source_, random_, broker_); child_span->setName(name()); child_span->setOperation(operation_name); + child_span->setDirection(Tracing::HttpTracerUtility::toString(config.operationName())); child_span->setStartTime(start_time); child_span->setParentId(id()); child_span->setTraceId(traceId()); @@ -118,12 +118,14 @@ Tracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& ope return child_span; } -Tracing::SpanPtr Tracer::startSpan(const std::string& operation_name, Envoy::SystemTime start_time, +Tracing::SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& operation_name, + Envoy::SystemTime start_time, const absl::optional& xray_header) { auto span_ptr = std::make_unique(time_source_, random_, *daemon_broker_); span_ptr->setName(segment_name_); span_ptr->setOperation(operation_name); + span_ptr->setDirection(Tracing::HttpTracerUtility::toString(config.operationName())); // Even though we have a TimeSource member in the tracer, we assume the start_time argument has a // more precise value than calling the systemTime() at this point in time. span_ptr->setStartTime(start_time); diff --git a/source/extensions/tracers/xray/tracer.h b/source/extensions/tracers/xray/tracer.h index 623eddd88638a..f53c4ca91a126 100644 --- a/source/extensions/tracers/xray/tracer.h +++ b/source/extensions/tracers/xray/tracer.h @@ -64,6 +64,12 @@ class Span : public Tracing::Span, Logger::Loggable { operation_name_ = std::string(operation); } + /** + * Sets the current direction on the Span. + * This information will be included in the X-Ray span's annotation. + */ + void setDirection(absl::string_view direction) { direction_ = std::string(direction); } + /** * Sets the name of the Span. */ @@ -140,8 +146,16 @@ class Span : public Tracing::Span, Logger::Loggable { */ const std::string& id() const { return id_; } + /** + * Gets this Span's parent ID. + */ const std::string& parentId() const { return parent_segment_id_; } + /** + * Gets this Span's direction. + */ + const std::string& direction() const { return direction_; } + /** * Gets this Span's name. */ @@ -196,6 +210,7 @@ class Span : public Tracing::Span, Logger::Loggable { DaemonBroker& broker_; Envoy::SystemTime start_time_; std::string operation_name_; + std::string direction_; std::string id_; std::string trace_id_; std::string parent_segment_id_; @@ -222,7 +237,8 @@ class Tracer { /** * Starts a tracing span for X-Ray */ - Tracing::SpanPtr startSpan(const std::string& operation_name, Envoy::SystemTime start_time, + Tracing::SpanPtr startSpan(const Tracing::Config&, const std::string& operation_name, + Envoy::SystemTime start_time, const absl::optional& xray_header); /** * Creates a Span that is marked as not-sampled. diff --git a/source/extensions/tracers/xray/xray_tracer_impl.cc b/source/extensions/tracers/xray/xray_tracer_impl.cc index 34d260d15668a..29e377229baa7 100644 --- a/source/extensions/tracers/xray/xray_tracer_impl.cc +++ b/source/extensions/tracers/xray/xray_tracer_impl.cc @@ -74,7 +74,6 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, // If we have a XRay TraceID in the headers, then we create a SpanContext to pass that trace-id // around if no TraceID (which means no x-ray header) then this is a brand new span. - UNREFERENCED_PARAMETER(config); // TODO(marcomagdy) - how do we factor this into the logic above UNREFERENCED_PARAMETER(tracing_decision); const auto header = trace_context.getByKey(XRayTraceHeader); @@ -106,7 +105,7 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, auto* tracer = tls_slot_ptr_->getTyped().tracer_.get(); if (should_trace.value()) { - return tracer->startSpan(operation_name, start_time, + return tracer->startSpan(config, operation_name, start_time, header.has_value() ? absl::optional(xray_header) : absl::nullopt); } diff --git a/source/extensions/tracers/zipkin/BUILD b/source/extensions/tracers/zipkin/BUILD index e7ee5ee6b0c75..f164514bead13 100644 --- a/source/extensions/tracers/zipkin/BUILD +++ b/source/extensions/tracers/zipkin/BUILD @@ -67,10 +67,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # Legacy test use. TODO(#9953) clean up. - extra_visibility = [ - "//test/server:__subpackages__", - ], deps = [ ":zipkin_lib", "//source/extensions/tracers/common:factory_base_lib", diff --git a/source/extensions/tracers/zipkin/span_buffer.cc b/source/extensions/tracers/zipkin/span_buffer.cc index 603a9129c0def..5c07f76abe397 100644 --- a/source/extensions/tracers/zipkin/span_buffer.cc +++ b/source/extensions/tracers/zipkin/span_buffer.cc @@ -48,7 +48,7 @@ SerializerPtr SpanBuffer::makeSerializer( const envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion& version, const bool shared_span_context) { switch (version) { - case envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1: + case envoy::config::trace::v3::ZipkinConfig::DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE: throw EnvoyException( "hidden_envoy_deprecated_HTTP_JSON_V1 has been deprecated. Please use a non-default " "envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion value."); diff --git a/source/extensions/transport_sockets/tap/BUILD b/source/extensions/transport_sockets/tap/BUILD index 47437466ea2c4..03bd2a581454a 100644 --- a/source/extensions/transport_sockets/tap/BUILD +++ b/source/extensions/transport_sockets/tap/BUILD @@ -51,11 +51,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up. - extra_visibility = [ - "//test/common/access_log:__subpackages__", - "//test/extensions/transport_sockets/tls/integration:__subpackages__", - ], deps = [ ":tap_config_impl", ":tap_lib", diff --git a/source/extensions/transport_sockets/tap/config.cc b/source/extensions/transport_sockets/tap/config.cc index f389c8633c22d..26255d00151d4 100644 --- a/source/extensions/transport_sockets/tap/config.cc +++ b/source/extensions/transport_sockets/tap/config.cc @@ -44,9 +44,10 @@ Network::TransportSocketFactoryPtr UpstreamTapSocketConfigFactory::createTranspo auto inner_transport_factory = inner_config_factory.createTransportSocketFactory(*inner_factory_config, context); return std::make_unique( - outer_config, std::make_unique(context.dispatcher().timeSource()), - context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher(), - std::move(inner_transport_factory)); + outer_config, + std::make_unique(context.mainThreadDispatcher().timeSource()), + context.admin(), context.singletonManager(), context.threadLocal(), + context.mainThreadDispatcher(), std::move(inner_transport_factory)); } Network::TransportSocketFactoryPtr DownstreamTapSocketConfigFactory::createTransportSocketFactory( @@ -63,9 +64,10 @@ Network::TransportSocketFactoryPtr DownstreamTapSocketConfigFactory::createTrans auto inner_transport_factory = inner_config_factory.createTransportSocketFactory( *inner_factory_config, context, server_names); return std::make_unique( - outer_config, std::make_unique(context.dispatcher().timeSource()), - context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher(), - std::move(inner_transport_factory)); + outer_config, + std::make_unique(context.mainThreadDispatcher().timeSource()), + context.admin(), context.singletonManager(), context.threadLocal(), + context.mainThreadDispatcher(), std::move(inner_transport_factory)); } ProtobufTypes::MessagePtr TapSocketConfigFactory::createEmptyConfigProto() { diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index 91deb5c164bf1..137768a7cf000 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -26,12 +26,25 @@ envoy_cc_extension( ], ) +envoy_cc_library( + name = "connection_info_impl_base_lib", + srcs = ["connection_info_impl_base.cc"], + hdrs = ["connection_info_impl_base.h"], + external_deps = ["ssl"], + deps = [ + ":context_lib", + ":utility_lib", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + ], +) + envoy_cc_library( name = "ssl_handshaker_lib", srcs = ["ssl_handshaker.cc"], hdrs = ["ssl_handshaker.h"], - external_deps = ["ssl"], deps = [ + ":connection_info_impl_base_lib", ":context_lib", ":utility_lib", "//envoy/network:connection_interface", diff --git a/source/extensions/transport_sockets/tls/cert_validator/BUILD b/source/extensions/transport_sockets/tls/cert_validator/BUILD index 6fcffd4d9e5f9..ce92df41e80c3 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/BUILD +++ b/source/extensions/transport_sockets/tls/cert_validator/BUILD @@ -33,6 +33,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/common:base64_lib", "//source/common/common:hex_lib", + "//source/common/common:minimal_logger_lib", "//source/common/common:utility_lib", "//source/common/stats:symbol_table_lib", "//source/common/stats:utility_lib", diff --git a/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc b/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc index 6cd624f2e38fa..369c2aa7375b5 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc +++ b/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc @@ -195,6 +195,7 @@ int DefaultCertValidator::doVerifyCertChain( if (ret <= 0) { stats_.fail_verify_error_.inc(); + ENVOY_LOG(debug, "{}", Utility::getX509VerificationErrorInfo(store_ctx)); return allow_untrusted_certificate_ ? 1 : ret; } } diff --git a/source/extensions/transport_sockets/tls/cert_validator/default_validator.h b/source/extensions/transport_sockets/tls/cert_validator/default_validator.h index 4d5daaf0205e0..163eb0118d427 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/default_validator.h +++ b/source/extensions/transport_sockets/tls/cert_validator/default_validator.h @@ -14,6 +14,7 @@ #include "envoy/ssl/private_key/private_key.h" #include "envoy/ssl/ssl_socket_extended_info.h" +#include "source/common/common/logger.h" #include "source/common/common/matchers.h" #include "source/common/stats/symbol_table_impl.h" #include "source/extensions/transport_sockets/tls/cert_validator/cert_validator.h" @@ -28,7 +29,7 @@ namespace Extensions { namespace TransportSockets { namespace Tls { -class DefaultCertValidator : public CertValidator { +class DefaultCertValidator : public CertValidator, Logger::Loggable { public: DefaultCertValidator(const Envoy::Ssl::CertificateValidationContextConfig* config, SslStats& stats, TimeSource& time_source); diff --git a/source/extensions/transport_sockets/tls/connection_info_impl_base.cc b/source/extensions/transport_sockets/tls/connection_info_impl_base.cc new file mode 100644 index 0000000000000..de692e42fff43 --- /dev/null +++ b/source/extensions/transport_sockets/tls/connection_info_impl_base.cc @@ -0,0 +1,280 @@ +#include "source/extensions/transport_sockets/tls/connection_info_impl_base.h" + +#include "source/common/common/hex.h" + +#include "absl/strings/str_replace.h" +#include "openssl/err.h" +#include "openssl/x509v3.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace Tls { + +bool ConnectionInfoImplBase::peerCertificatePresented() const { + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + return cert != nullptr; +} + +absl::Span ConnectionInfoImplBase::uriSanLocalCertificate() const { + if (!cached_uri_san_local_certificate_.empty()) { + return cached_uri_san_local_certificate_; + } + + // The cert object is not owned. + X509* cert = SSL_get_certificate(ssl()); + if (!cert) { + ASSERT(cached_uri_san_local_certificate_.empty()); + return cached_uri_san_local_certificate_; + } + cached_uri_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI); + return cached_uri_san_local_certificate_; +} + +absl::Span ConnectionInfoImplBase::dnsSansLocalCertificate() const { + if (!cached_dns_san_local_certificate_.empty()) { + return cached_dns_san_local_certificate_; + } + + X509* cert = SSL_get_certificate(ssl()); + if (!cert) { + ASSERT(cached_dns_san_local_certificate_.empty()); + return cached_dns_san_local_certificate_; + } + cached_dns_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS); + return cached_dns_san_local_certificate_; +} + +const std::string& ConnectionInfoImplBase::sha256PeerCertificateDigest() const { + if (!cached_sha_256_peer_certificate_digest_.empty()) { + return cached_sha_256_peer_certificate_digest_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_sha_256_peer_certificate_digest_.empty()); + return cached_sha_256_peer_certificate_digest_; + } + + std::vector computed_hash(SHA256_DIGEST_LENGTH); + unsigned int n; + X509_digest(cert.get(), EVP_sha256(), computed_hash.data(), &n); + RELEASE_ASSERT(n == computed_hash.size(), ""); + cached_sha_256_peer_certificate_digest_ = Hex::encode(computed_hash); + return cached_sha_256_peer_certificate_digest_; +} + +const std::string& ConnectionInfoImplBase::sha1PeerCertificateDigest() const { + if (!cached_sha_1_peer_certificate_digest_.empty()) { + return cached_sha_1_peer_certificate_digest_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_sha_1_peer_certificate_digest_.empty()); + return cached_sha_1_peer_certificate_digest_; + } + + std::vector computed_hash(SHA_DIGEST_LENGTH); + unsigned int n; + X509_digest(cert.get(), EVP_sha1(), computed_hash.data(), &n); + RELEASE_ASSERT(n == computed_hash.size(), ""); + cached_sha_1_peer_certificate_digest_ = Hex::encode(computed_hash); + return cached_sha_1_peer_certificate_digest_; +} + +const std::string& ConnectionInfoImplBase::urlEncodedPemEncodedPeerCertificate() const { + if (!cached_url_encoded_pem_encoded_peer_certificate_.empty()) { + return cached_url_encoded_pem_encoded_peer_certificate_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_url_encoded_pem_encoded_peer_certificate_.empty()); + return cached_url_encoded_pem_encoded_peer_certificate_; + } + + bssl::UniquePtr buf(BIO_new(BIO_s_mem())); + RELEASE_ASSERT(buf != nullptr, ""); + RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert.get()) == 1, ""); + const uint8_t* output; + size_t length; + RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, ""); + absl::string_view pem(reinterpret_cast(output), length); + cached_url_encoded_pem_encoded_peer_certificate_ = absl::StrReplaceAll( + pem, {{"\n", "%0A"}, {" ", "%20"}, {"+", "%2B"}, {"/", "%2F"}, {"=", "%3D"}}); + return cached_url_encoded_pem_encoded_peer_certificate_; +} + +const std::string& ConnectionInfoImplBase::urlEncodedPemEncodedPeerCertificateChain() const { + if (!cached_url_encoded_pem_encoded_peer_cert_chain_.empty()) { + return cached_url_encoded_pem_encoded_peer_cert_chain_; + } + + STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl()); + if (cert_chain == nullptr) { + ASSERT(cached_url_encoded_pem_encoded_peer_cert_chain_.empty()); + return cached_url_encoded_pem_encoded_peer_cert_chain_; + } + + for (uint64_t i = 0; i < sk_X509_num(cert_chain); i++) { + X509* cert = sk_X509_value(cert_chain, i); + + bssl::UniquePtr buf(BIO_new(BIO_s_mem())); + RELEASE_ASSERT(buf != nullptr, ""); + RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert) == 1, ""); + const uint8_t* output; + size_t length; + RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, ""); + + absl::string_view pem(reinterpret_cast(output), length); + cached_url_encoded_pem_encoded_peer_cert_chain_ = absl::StrCat( + cached_url_encoded_pem_encoded_peer_cert_chain_, + absl::StrReplaceAll( + pem, {{"\n", "%0A"}, {" ", "%20"}, {"+", "%2B"}, {"/", "%2F"}, {"=", "%3D"}})); + } + return cached_url_encoded_pem_encoded_peer_cert_chain_; +} + +absl::Span ConnectionInfoImplBase::uriSanPeerCertificate() const { + if (!cached_uri_san_peer_certificate_.empty()) { + return cached_uri_san_peer_certificate_; + } + + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_uri_san_peer_certificate_.empty()); + return cached_uri_san_peer_certificate_; + } + cached_uri_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI); + return cached_uri_san_peer_certificate_; +} + +absl::Span ConnectionInfoImplBase::dnsSansPeerCertificate() const { + if (!cached_dns_san_peer_certificate_.empty()) { + return cached_dns_san_peer_certificate_; + } + + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_dns_san_peer_certificate_.empty()); + return cached_dns_san_peer_certificate_; + } + cached_dns_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS); + return cached_dns_san_peer_certificate_; +} + +uint16_t ConnectionInfoImplBase::ciphersuiteId() const { + const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); + if (cipher == nullptr) { + return 0xffff; + } + + // From the OpenSSL docs: + // SSL_CIPHER_get_id returns |cipher|'s id. It may be cast to a |uint16_t| to + // get the cipher suite value. + return static_cast(SSL_CIPHER_get_id(cipher)); +} + +std::string ConnectionInfoImplBase::ciphersuiteString() const { + const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); + if (cipher == nullptr) { + return {}; + } + + return SSL_CIPHER_get_name(cipher); +} + +const std::string& ConnectionInfoImplBase::tlsVersion() const { + if (!cached_tls_version_.empty()) { + return cached_tls_version_; + } + cached_tls_version_ = SSL_get_version(ssl()); + return cached_tls_version_; +} + +const std::string& ConnectionInfoImplBase::serialNumberPeerCertificate() const { + if (!cached_serial_number_peer_certificate_.empty()) { + return cached_serial_number_peer_certificate_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_serial_number_peer_certificate_.empty()); + return cached_serial_number_peer_certificate_; + } + cached_serial_number_peer_certificate_ = Utility::getSerialNumberFromCertificate(*cert.get()); + return cached_serial_number_peer_certificate_; +} + +const std::string& ConnectionInfoImplBase::issuerPeerCertificate() const { + if (!cached_issuer_peer_certificate_.empty()) { + return cached_issuer_peer_certificate_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_issuer_peer_certificate_.empty()); + return cached_issuer_peer_certificate_; + } + cached_issuer_peer_certificate_ = Utility::getIssuerFromCertificate(*cert); + return cached_issuer_peer_certificate_; +} + +const std::string& ConnectionInfoImplBase::subjectPeerCertificate() const { + if (!cached_subject_peer_certificate_.empty()) { + return cached_subject_peer_certificate_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_subject_peer_certificate_.empty()); + return cached_subject_peer_certificate_; + } + cached_subject_peer_certificate_ = Utility::getSubjectFromCertificate(*cert); + return cached_subject_peer_certificate_; +} + +const std::string& ConnectionInfoImplBase::subjectLocalCertificate() const { + if (!cached_subject_local_certificate_.empty()) { + return cached_subject_local_certificate_; + } + X509* cert = SSL_get_certificate(ssl()); + if (!cert) { + ASSERT(cached_subject_local_certificate_.empty()); + return cached_subject_local_certificate_; + } + cached_subject_local_certificate_ = Utility::getSubjectFromCertificate(*cert); + return cached_subject_local_certificate_; +} + +absl::optional ConnectionInfoImplBase::validFromPeerCertificate() const { + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + return absl::nullopt; + } + return Utility::getValidFrom(*cert); +} + +absl::optional ConnectionInfoImplBase::expirationPeerCertificate() const { + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + return absl::nullopt; + } + return Utility::getExpirationTime(*cert); +} + +const std::string& ConnectionInfoImplBase::sessionId() const { + if (!cached_session_id_.empty()) { + return cached_session_id_; + } + SSL_SESSION* session = SSL_get_session(ssl()); + if (session == nullptr) { + ASSERT(cached_session_id_.empty()); + return cached_session_id_; + } + + unsigned int session_id_length = 0; + const uint8_t* session_id = SSL_SESSION_get_id(session, &session_id_length); + cached_session_id_ = Hex::encode(session_id, session_id_length); + return cached_session_id_; +} + +} // namespace Tls +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/transport_sockets/tls/connection_info_impl_base.h b/source/extensions/transport_sockets/tls/connection_info_impl_base.h new file mode 100644 index 0000000000000..e051bccd735af --- /dev/null +++ b/source/extensions/transport_sockets/tls/connection_info_impl_base.h @@ -0,0 +1,65 @@ +#pragma once + +#include + +#include "envoy/ssl/connection.h" + +#include "source/common/common/logger.h" +#include "source/extensions/transport_sockets/tls/utility.h" + +#include "absl/types/optional.h" +#include "openssl/ssl.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace Tls { + +// An implementation wraps struct SSL in BoringSSL. +class ConnectionInfoImplBase : public Ssl::ConnectionInfo { +public: + // Ssl::ConnectionInfo + bool peerCertificatePresented() const override; + absl::Span uriSanLocalCertificate() const override; + const std::string& sha256PeerCertificateDigest() const override; + const std::string& sha1PeerCertificateDigest() const override; + const std::string& serialNumberPeerCertificate() const override; + const std::string& issuerPeerCertificate() const override; + const std::string& subjectPeerCertificate() const override; + const std::string& subjectLocalCertificate() const override; + absl::Span uriSanPeerCertificate() const override; + const std::string& urlEncodedPemEncodedPeerCertificate() const override; + const std::string& urlEncodedPemEncodedPeerCertificateChain() const override; + absl::Span dnsSansPeerCertificate() const override; + absl::Span dnsSansLocalCertificate() const override; + absl::optional validFromPeerCertificate() const override; + absl::optional expirationPeerCertificate() const override; + const std::string& sessionId() const override; + uint16_t ciphersuiteId() const override; + std::string ciphersuiteString() const override; + const std::string& tlsVersion() const override; + + virtual SSL* ssl() const PURE; + +protected: + bssl::UniquePtr ssl_; + mutable std::vector cached_uri_san_local_certificate_; + mutable std::string cached_sha_256_peer_certificate_digest_; + mutable std::string cached_sha_1_peer_certificate_digest_; + mutable std::string cached_serial_number_peer_certificate_; + mutable std::string cached_issuer_peer_certificate_; + mutable std::string cached_subject_peer_certificate_; + mutable std::string cached_subject_local_certificate_; + mutable std::vector cached_uri_san_peer_certificate_; + mutable std::string cached_url_encoded_pem_encoded_peer_certificate_; + mutable std::string cached_url_encoded_pem_encoded_peer_cert_chain_; + mutable std::vector cached_dns_san_peer_certificate_; + mutable std::vector cached_dns_san_local_certificate_; + mutable std::string cached_session_id_; + mutable std::string cached_tls_version_; +}; + +} // namespace Tls +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index f8870d4dddc88..0afc83d5a67ae 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -1171,14 +1171,19 @@ bool ContextImpl::verifyCertChain(X509& leaf_cert, STACK_OF(X509) & intermediate error_details = "Failed to verify certificate chain: X509_STORE_CTX_init"; return false; } + // Currently this method is only used to verify server certs, so hard-code "ssl_server" for now. + if (!X509_STORE_CTX_set_default(ctx.get(), "ssl_server") || + !X509_VERIFY_PARAM_set1(X509_STORE_CTX_get0_param(ctx.get()), + SSL_CTX_get0_param(const_cast(ssl_ctx)))) { + error_details = + "Failed to verify certificate chain: fail to setup X509_STORE_CTX or its param."; + return false; + } int res = cert_validator_->doVerifyCertChain(ctx.get(), nullptr, leaf_cert, nullptr); // If |SSL_VERIFY_NONE|, the error is non-fatal, but we keep the error details. if (res <= 0 && SSL_CTX_get_verify_mode(ssl_ctx) != SSL_VERIFY_NONE) { - const int n = X509_STORE_CTX_get_error(ctx.get()); - const int depth = X509_STORE_CTX_get_error_depth(ctx.get()); - error_details = absl::StrCat("X509_verify_cert: certificate verification error at depth ", - depth, ": ", X509_verify_cert_error_string(n)); + error_details = Utility::getX509VerificationErrorInfo(ctx.get()); return false; } return true; diff --git a/source/extensions/transport_sockets/tls/private_key/BUILD b/source/extensions/transport_sockets/tls/private_key/BUILD index 3d9a42a78c76d..a7bae7c825a0f 100644 --- a/source/extensions/transport_sockets/tls/private_key/BUILD +++ b/source/extensions/transport_sockets/tls/private_key/BUILD @@ -16,6 +16,7 @@ envoy_cc_library( hdrs = [ "private_key_manager_impl.h", ], + visibility = ["//visibility:public"], deps = [ "//envoy/event:dispatcher_interface", "//envoy/registry", diff --git a/source/extensions/transport_sockets/tls/ssl_handshaker.cc b/source/extensions/transport_sockets/tls/ssl_handshaker.cc index 5361d7b42bc4e..714899cf5f478 100644 --- a/source/extensions/transport_sockets/tls/ssl_handshaker.cc +++ b/source/extensions/transport_sockets/tls/ssl_handshaker.cc @@ -4,14 +4,9 @@ #include "source/common/common/assert.h" #include "source/common/common/empty_string.h" -#include "source/common/common/hex.h" #include "source/common/http/headers.h" #include "source/extensions/transport_sockets/tls/utility.h" -#include "absl/strings/str_replace.h" -#include "openssl/err.h" -#include "openssl/x509v3.h" - using Envoy::Network::PostIoAction; namespace Envoy { @@ -35,190 +30,11 @@ SslHandshakerImpl::SslHandshakerImpl(bssl::UniquePtr ssl, int ssl_extended_ SSL_set_ex_data(ssl_.get(), ssl_extended_socket_info_index, &(this->extended_socket_info_)); } -bool SslHandshakerImpl::peerCertificatePresented() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - return cert != nullptr; -} - bool SslHandshakerImpl::peerCertificateValidated() const { return extended_socket_info_.certificateValidationStatus() == Envoy::Ssl::ClientValidationStatus::Validated; } -absl::Span SslHandshakerImpl::uriSanLocalCertificate() const { - if (!cached_uri_san_local_certificate_.empty()) { - return cached_uri_san_local_certificate_; - } - - // The cert object is not owned. - X509* cert = SSL_get_certificate(ssl()); - if (!cert) { - ASSERT(cached_uri_san_local_certificate_.empty()); - return cached_uri_san_local_certificate_; - } - cached_uri_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI); - return cached_uri_san_local_certificate_; -} - -absl::Span SslHandshakerImpl::dnsSansLocalCertificate() const { - if (!cached_dns_san_local_certificate_.empty()) { - return cached_dns_san_local_certificate_; - } - - X509* cert = SSL_get_certificate(ssl()); - if (!cert) { - ASSERT(cached_dns_san_local_certificate_.empty()); - return cached_dns_san_local_certificate_; - } - cached_dns_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS); - return cached_dns_san_local_certificate_; -} - -const std::string& SslHandshakerImpl::sha256PeerCertificateDigest() const { - if (!cached_sha_256_peer_certificate_digest_.empty()) { - return cached_sha_256_peer_certificate_digest_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_sha_256_peer_certificate_digest_.empty()); - return cached_sha_256_peer_certificate_digest_; - } - - std::vector computed_hash(SHA256_DIGEST_LENGTH); - unsigned int n; - X509_digest(cert.get(), EVP_sha256(), computed_hash.data(), &n); - RELEASE_ASSERT(n == computed_hash.size(), ""); - cached_sha_256_peer_certificate_digest_ = Hex::encode(computed_hash); - return cached_sha_256_peer_certificate_digest_; -} - -const std::string& SslHandshakerImpl::sha1PeerCertificateDigest() const { - if (!cached_sha_1_peer_certificate_digest_.empty()) { - return cached_sha_1_peer_certificate_digest_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_sha_1_peer_certificate_digest_.empty()); - return cached_sha_1_peer_certificate_digest_; - } - - std::vector computed_hash(SHA_DIGEST_LENGTH); - unsigned int n; - X509_digest(cert.get(), EVP_sha1(), computed_hash.data(), &n); - RELEASE_ASSERT(n == computed_hash.size(), ""); - cached_sha_1_peer_certificate_digest_ = Hex::encode(computed_hash); - return cached_sha_1_peer_certificate_digest_; -} - -const std::string& SslHandshakerImpl::urlEncodedPemEncodedPeerCertificate() const { - if (!cached_url_encoded_pem_encoded_peer_certificate_.empty()) { - return cached_url_encoded_pem_encoded_peer_certificate_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_url_encoded_pem_encoded_peer_certificate_.empty()); - return cached_url_encoded_pem_encoded_peer_certificate_; - } - - bssl::UniquePtr buf(BIO_new(BIO_s_mem())); - RELEASE_ASSERT(buf != nullptr, ""); - RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert.get()) == 1, ""); - const uint8_t* output; - size_t length; - RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, ""); - absl::string_view pem(reinterpret_cast(output), length); - cached_url_encoded_pem_encoded_peer_certificate_ = absl::StrReplaceAll( - pem, {{"\n", "%0A"}, {" ", "%20"}, {"+", "%2B"}, {"/", "%2F"}, {"=", "%3D"}}); - return cached_url_encoded_pem_encoded_peer_certificate_; -} - -const std::string& SslHandshakerImpl::urlEncodedPemEncodedPeerCertificateChain() const { - if (!cached_url_encoded_pem_encoded_peer_cert_chain_.empty()) { - return cached_url_encoded_pem_encoded_peer_cert_chain_; - } - - STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl()); - if (cert_chain == nullptr) { - ASSERT(cached_url_encoded_pem_encoded_peer_cert_chain_.empty()); - return cached_url_encoded_pem_encoded_peer_cert_chain_; - } - - for (uint64_t i = 0; i < sk_X509_num(cert_chain); i++) { - X509* cert = sk_X509_value(cert_chain, i); - - bssl::UniquePtr buf(BIO_new(BIO_s_mem())); - RELEASE_ASSERT(buf != nullptr, ""); - RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert) == 1, ""); - const uint8_t* output; - size_t length; - RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, ""); - - absl::string_view pem(reinterpret_cast(output), length); - cached_url_encoded_pem_encoded_peer_cert_chain_ = absl::StrCat( - cached_url_encoded_pem_encoded_peer_cert_chain_, - absl::StrReplaceAll( - pem, {{"\n", "%0A"}, {" ", "%20"}, {"+", "%2B"}, {"/", "%2F"}, {"=", "%3D"}})); - } - return cached_url_encoded_pem_encoded_peer_cert_chain_; -} - -absl::Span SslHandshakerImpl::uriSanPeerCertificate() const { - if (!cached_uri_san_peer_certificate_.empty()) { - return cached_uri_san_peer_certificate_; - } - - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_uri_san_peer_certificate_.empty()); - return cached_uri_san_peer_certificate_; - } - cached_uri_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI); - return cached_uri_san_peer_certificate_; -} - -absl::Span SslHandshakerImpl::dnsSansPeerCertificate() const { - if (!cached_dns_san_peer_certificate_.empty()) { - return cached_dns_san_peer_certificate_; - } - - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_dns_san_peer_certificate_.empty()); - return cached_dns_san_peer_certificate_; - } - cached_dns_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS); - return cached_dns_san_peer_certificate_; -} - -uint16_t SslHandshakerImpl::ciphersuiteId() const { - const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); - if (cipher == nullptr) { - return 0xffff; - } - - // From the OpenSSL docs: - // SSL_CIPHER_get_id returns |cipher|'s id. It may be cast to a |uint16_t| to - // get the cipher suite value. - return static_cast(SSL_CIPHER_get_id(cipher)); -} - -std::string SslHandshakerImpl::ciphersuiteString() const { - const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); - if (cipher == nullptr) { - return {}; - } - - return SSL_CIPHER_get_name(cipher); -} - -const std::string& SslHandshakerImpl::tlsVersion() const { - if (!cached_tls_version_.empty()) { - return cached_tls_version_; - } - cached_tls_version_ = SSL_get_version(ssl()); - return cached_tls_version_; -} - Network::PostIoAction SslHandshakerImpl::doHandshake() { ASSERT(state_ != Ssl::SocketState::HandshakeComplete && state_ != Ssl::SocketState::ShutdownSent); int rc = SSL_do_handshake(ssl()); @@ -248,90 +64,6 @@ Network::PostIoAction SslHandshakerImpl::doHandshake() { } } -const std::string& SslHandshakerImpl::serialNumberPeerCertificate() const { - if (!cached_serial_number_peer_certificate_.empty()) { - return cached_serial_number_peer_certificate_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_serial_number_peer_certificate_.empty()); - return cached_serial_number_peer_certificate_; - } - cached_serial_number_peer_certificate_ = Utility::getSerialNumberFromCertificate(*cert.get()); - return cached_serial_number_peer_certificate_; -} - -const std::string& SslHandshakerImpl::issuerPeerCertificate() const { - if (!cached_issuer_peer_certificate_.empty()) { - return cached_issuer_peer_certificate_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_issuer_peer_certificate_.empty()); - return cached_issuer_peer_certificate_; - } - cached_issuer_peer_certificate_ = Utility::getIssuerFromCertificate(*cert); - return cached_issuer_peer_certificate_; -} - -const std::string& SslHandshakerImpl::subjectPeerCertificate() const { - if (!cached_subject_peer_certificate_.empty()) { - return cached_subject_peer_certificate_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_subject_peer_certificate_.empty()); - return cached_subject_peer_certificate_; - } - cached_subject_peer_certificate_ = Utility::getSubjectFromCertificate(*cert); - return cached_subject_peer_certificate_; -} - -const std::string& SslHandshakerImpl::subjectLocalCertificate() const { - if (!cached_subject_local_certificate_.empty()) { - return cached_subject_local_certificate_; - } - X509* cert = SSL_get_certificate(ssl()); - if (!cert) { - ASSERT(cached_subject_local_certificate_.empty()); - return cached_subject_local_certificate_; - } - cached_subject_local_certificate_ = Utility::getSubjectFromCertificate(*cert); - return cached_subject_local_certificate_; -} - -absl::optional SslHandshakerImpl::validFromPeerCertificate() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - return absl::nullopt; - } - return Utility::getValidFrom(*cert); -} - -absl::optional SslHandshakerImpl::expirationPeerCertificate() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - return absl::nullopt; - } - return Utility::getExpirationTime(*cert); -} - -const std::string& SslHandshakerImpl::sessionId() const { - if (!cached_session_id_.empty()) { - return cached_session_id_; - } - SSL_SESSION* session = SSL_get_session(ssl()); - if (session == nullptr) { - ASSERT(cached_session_id_.empty()); - return cached_session_id_; - } - - unsigned int session_id_length = 0; - const uint8_t* session_id = SSL_SESSION_get_id(session, &session_id_length); - cached_session_id_ = Hex::encode(session_id, session_id_length); - return cached_session_id_; -} - } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/ssl_handshaker.h b/source/extensions/transport_sockets/tls/ssl_handshaker.h index 81577c8181744..2ca882fe3ac19 100644 --- a/source/extensions/transport_sockets/tls/ssl_handshaker.h +++ b/source/extensions/transport_sockets/tls/ssl_handshaker.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/network/connection.h" #include "envoy/network/transport_socket.h" @@ -14,6 +13,7 @@ #include "envoy/stats/stats_macros.h" #include "source/common/common/logger.h" +#include "source/extensions/transport_sockets/tls/connection_info_impl_base.h" #include "source/extensions/transport_sockets/tls/utility.h" #include "absl/container/node_hash_map.h" @@ -36,7 +36,7 @@ class SslExtendedSocketInfoImpl : public Envoy::Ssl::SslExtendedSocketInfo { Envoy::Ssl::ClientValidationStatus::NotValidated}; }; -class SslHandshakerImpl : public Ssl::ConnectionInfo, +class SslHandshakerImpl : public ConnectionInfoImplBase, public Ssl::Handshaker, protected Logger::Loggable { public: @@ -44,33 +44,16 @@ class SslHandshakerImpl : public Ssl::ConnectionInfo, Ssl::HandshakeCallbacks* handshake_callbacks); // Ssl::ConnectionInfo - bool peerCertificatePresented() const override; bool peerCertificateValidated() const override; - absl::Span uriSanLocalCertificate() const override; - const std::string& sha256PeerCertificateDigest() const override; - const std::string& sha1PeerCertificateDigest() const override; - const std::string& serialNumberPeerCertificate() const override; - const std::string& issuerPeerCertificate() const override; - const std::string& subjectPeerCertificate() const override; - const std::string& subjectLocalCertificate() const override; - absl::Span uriSanPeerCertificate() const override; - const std::string& urlEncodedPemEncodedPeerCertificate() const override; - const std::string& urlEncodedPemEncodedPeerCertificateChain() const override; - absl::Span dnsSansPeerCertificate() const override; - absl::Span dnsSansLocalCertificate() const override; - absl::optional validFromPeerCertificate() const override; - absl::optional expirationPeerCertificate() const override; - const std::string& sessionId() const override; - uint16_t ciphersuiteId() const override; - std::string ciphersuiteString() const override; - const std::string& tlsVersion() const override; + + // ConnectionInfoImplBase + SSL* ssl() const override { return ssl_.get(); } // Ssl::Handshaker Network::PostIoAction doHandshake() override; Ssl::SocketState state() const { return state_; } void setState(Ssl::SocketState state) { state_ = state; } - SSL* ssl() const { return ssl_.get(); } Ssl::HandshakeCallbacks* handshakeCallbacks() { return handshake_callbacks_; } bssl::UniquePtr ssl_; @@ -79,20 +62,6 @@ class SslHandshakerImpl : public Ssl::ConnectionInfo, Ssl::HandshakeCallbacks* handshake_callbacks_; Ssl::SocketState state_; - mutable std::vector cached_uri_san_local_certificate_; - mutable std::string cached_sha_256_peer_certificate_digest_; - mutable std::string cached_sha_1_peer_certificate_digest_; - mutable std::string cached_serial_number_peer_certificate_; - mutable std::string cached_issuer_peer_certificate_; - mutable std::string cached_subject_peer_certificate_; - mutable std::string cached_subject_local_certificate_; - mutable std::vector cached_uri_san_peer_certificate_; - mutable std::string cached_url_encoded_pem_encoded_peer_certificate_; - mutable std::string cached_url_encoded_pem_encoded_peer_cert_chain_; - mutable std::vector cached_dns_san_peer_certificate_; - mutable std::vector cached_dns_san_local_certificate_; - mutable std::string cached_session_id_; - mutable std::string cached_tls_version_; mutable SslExtendedSocketInfoImpl extended_socket_info_; }; diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index c5056786ee9a2..31ed7fd52f012 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -164,7 +164,7 @@ Network::IoResult SslSocket::doRead(Buffer::Instance& read_buffer) { } void SslSocket::onPrivateKeyMethodComplete() { - ASSERT(isThreadSafe()); + ASSERT(callbacks_ != nullptr && callbacks_->connection().dispatcher().isThreadSafe()); ASSERT(info_->state() == Ssl::SocketState::HandshakeInProgress); // Resume handshake. diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index 186bebbabc067..d7ce778387f6f 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -73,7 +73,7 @@ class SslSocket : public Network::TransportSocket, SSL* rawSslForTest() const { return rawSsl(); } protected: - SSL* rawSsl() const { return info_->ssl_.get(); } + SSL* rawSsl() const { return info_->ssl(); } private: struct ReadResult { @@ -86,9 +86,6 @@ class SslSocket : public Network::TransportSocket, void drainErrorQueue(); void shutdownSsl(); void shutdownBasic(); - bool isThreadSafe() const { - return callbacks_ != nullptr && callbacks_->connection().dispatcher().isThreadSafe(); - } const Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; Network::TransportSocketCallbacks* callbacks_{}; diff --git a/source/extensions/transport_sockets/tls/utility.cc b/source/extensions/transport_sockets/tls/utility.cc index cbc6739c02996..3cf34a07e1f17 100644 --- a/source/extensions/transport_sockets/tls/utility.cc +++ b/source/extensions/transport_sockets/tls/utility.cc @@ -331,6 +331,15 @@ absl::string_view Utility::getErrorDescription(int err) { return SSL_ERROR_UNKNOWN_ERROR_MESSAGE; } +std::string Utility::getX509VerificationErrorInfo(X509_STORE_CTX* ctx) { + const int n = X509_STORE_CTX_get_error(ctx); + const int depth = X509_STORE_CTX_get_error_depth(ctx); + std::string error_details = + absl::StrCat("X509_verify_cert: certificate verification error at depth ", depth, ": ", + X509_verify_cert_error_string(n)); + return error_details; +} + } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/utility.h b/source/extensions/transport_sockets/tls/utility.h index 76824328d46ba..1c638585649df 100644 --- a/source/extensions/transport_sockets/tls/utility.h +++ b/source/extensions/transport_sockets/tls/utility.h @@ -101,6 +101,14 @@ absl::optional getLastCryptoError(); */ absl::string_view getErrorDescription(int err); +/** + * Extracts the X509 certificate validation error information. + * + * @param ctx the store context + * @return the error details + */ +std::string getX509VerificationErrorInfo(X509_STORE_CTX* ctx); + } // namespace Utility } // namespace Tls } // namespace TransportSockets diff --git a/source/extensions/upstreams/http/config.cc b/source/extensions/upstreams/http/config.cc index 29cde124642b0..52e78f1e7c3c5 100644 --- a/source/extensions/upstreams/http/config.cc +++ b/source/extensions/upstreams/http/config.cc @@ -111,7 +111,11 @@ ProtocolOptionsConfigImpl::ProtocolOptionsConfigImpl( use_http2_ = true; use_alpn_ = true; use_http3_ = options.auto_config().has_http3_protocol_options(); - if (options.auto_config().has_alternate_protocols_cache_options()) { + if (use_http3_) { + if (!options.auto_config().has_alternate_protocols_cache_options()) { + throw EnvoyException(fmt::format("alternate protocols cache must be configured when HTTP/3 " + "is enabled with auto_config")); + } alternate_protocol_cache_options_ = options.auto_config().alternate_protocols_cache_options(); } } diff --git a/source/extensions/upstreams/tcp/generic/config.cc b/source/extensions/upstreams/tcp/generic/config.cc index 77625e5fd0404..491f0569185d9 100644 --- a/source/extensions/upstreams/tcp/generic/config.cc +++ b/source/extensions/upstreams/tcp/generic/config.cc @@ -16,10 +16,15 @@ TcpProxy::GenericConnPoolPtr GenericConnPoolFactory::createGenericConnPool( const absl::optional& config, Upstream::LoadBalancerContext* context, Envoy::Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks) const { if (config.has_value()) { - auto pool_type = - ((thread_local_cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP2) != 0) - ? Http::CodecType::HTTP2 - : Http::CodecType::HTTP1; + Http::CodecType pool_type; + if ((thread_local_cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP2) != 0) { + pool_type = Http::CodecType::HTTP2; + } else if ((thread_local_cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP3) != + 0) { + pool_type = Http::CodecType::HTTP3; + } else { + pool_type = Http::CodecType::HTTP1; + } auto ret = std::make_unique( thread_local_cluster, context, config.value(), upstream_callbacks, pool_type); return (ret->valid() ? std::move(ret) : nullptr); diff --git a/source/extensions/watchdog/profile_action/BUILD b/source/extensions/watchdog/profile_action/BUILD index 1de6bb89d0755..5f01f35e2ff24 100644 --- a/source/extensions/watchdog/profile_action/BUILD +++ b/source/extensions/watchdog/profile_action/BUILD @@ -25,7 +25,7 @@ envoy_cc_library( "//source/common/profiler:profiler_lib", "//source/common/protobuf:utility_lib", "//source/common/stats:symbol_table_lib", - "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/watchdog/profile_action/v3:pkg_cc_proto", ], ) @@ -40,6 +40,6 @@ envoy_cc_extension( "//source/common/config:utility_lib", "//source/common/protobuf", "//source/common/protobuf:message_validator_lib", - "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/watchdog/profile_action/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/watchdog/profile_action/config.h b/source/extensions/watchdog/profile_action/config.h index de821d48fcedd..a6d208fed4dcf 100644 --- a/source/extensions/watchdog/profile_action/config.h +++ b/source/extensions/watchdog/profile_action/config.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h" +#include "envoy/extensions/watchdog/profile_action/v3/profile_action.pb.h" #include "envoy/server/guarddog_config.h" #include "source/common/protobuf/protobuf.h" @@ -25,8 +25,7 @@ class ProfileActionFactory : public Server::Configuration::GuardDogActionFactory std::string name() const override { return "envoy.watchdog.profile_action"; } private: - using ProfileActionConfig = - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig; + using ProfileActionConfig = envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig; }; } // namespace ProfileAction diff --git a/source/extensions/watchdog/profile_action/profile_action.cc b/source/extensions/watchdog/profile_action/profile_action.cc index 3d1fc5adc4653..3f0e556a3cdeb 100644 --- a/source/extensions/watchdog/profile_action/profile_action.cc +++ b/source/extensions/watchdog/profile_action/profile_action.cc @@ -27,7 +27,7 @@ std::string generateProfileFilePath(const std::string& directory, TimeSource& ti } // namespace ProfileAction::ProfileAction( - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig& config, + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig& config, Server::Configuration::GuardDogActionFactoryContext& context) : path_(config.profile_path()), duration_( diff --git a/source/extensions/watchdog/profile_action/profile_action.h b/source/extensions/watchdog/profile_action/profile_action.h index 144f6b9861ff3..5414ee32ee3ec 100644 --- a/source/extensions/watchdog/profile_action/profile_action.h +++ b/source/extensions/watchdog/profile_action/profile_action.h @@ -2,7 +2,7 @@ #include -#include "envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h" +#include "envoy/extensions/watchdog/profile_action/v3/profile_action.pb.h" #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" @@ -18,7 +18,7 @@ namespace ProfileAction { */ class ProfileAction : public Server::Configuration::GuardDogAction { public: - ProfileAction(envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig& config, + ProfileAction(envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig& config, Server::Configuration::GuardDogActionFactoryContext& context); void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, diff --git a/source/server/BUILD b/source/server/BUILD index b29bb1a84e333..3f4ed5f3599be 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -272,7 +272,7 @@ envoy_cc_library( "//source/common/stats:symbol_table_lib", "//source/common/watchdog:abort_action_config", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) @@ -538,6 +538,14 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "factory_context_base_impl_lib", + hdrs = ["factory_context_base_impl.h"], + deps = [ + "//envoy/server:factory_context_interface", + ], +) + envoy_cc_library( name = "server_lib", srcs = ["server.cc"], diff --git a/source/server/active_stream_listener_base.h b/source/server/active_stream_listener_base.h index 89ebd2877c659..38e50a1a4af58 100644 --- a/source/server/active_stream_listener_base.h +++ b/source/server/active_stream_listener_base.h @@ -38,8 +38,8 @@ class ActiveStreamListenerBase : public ActiveListenerImplBase, * Schedule to remove and destroy the active connections which are not tracked by listener * config. Caution: The connection are not destroyed yet when function returns. */ - void - deferredRemoveFilterChains(const std::list& draining_filter_chains) { + void onFilterChainDraining( + const std::list& draining_filter_chains) override { // Need to recover the original deleting state. const bool was_deleting = is_deleting_; is_deleting_ = true; diff --git a/source/server/active_tcp_listener.h b/source/server/active_tcp_listener.h index 00d93e744a8d7..0d0299a44914f 100644 --- a/source/server/active_tcp_listener.h +++ b/source/server/active_tcp_listener.h @@ -74,7 +74,7 @@ class ActiveTcpListener final : public Network::TcpListenerCallbacks, * Update the listener config. The follow up connections will see the new config. The existing * connections are not impacted. */ - void updateListenerConfig(Network::ListenerConfig& config); + void updateListenerConfig(Network::ListenerConfig& config) override; Network::TcpConnectionHandler& tcp_conn_handler_; // The number of connections currently active on this listener. This is typically used for diff --git a/source/server/active_udp_listener.h b/source/server/active_udp_listener.h index eef7ca228e738..68918ffc39301 100644 --- a/source/server/active_udp_listener.h +++ b/source/server/active_udp_listener.h @@ -103,6 +103,12 @@ class ActiveRawUdpListener : public ActiveUdpListenerBase, read_filter_.reset(); udp_listener_.reset(); } + // These two are unreachable because a config will be rejected if it configures both this listener + // and any L4 filter chain. + void updateListenerConfig(Network::ListenerConfig&) override { NOT_REACHED_GCOVR_EXCL_LINE; } + void onFilterChainDraining(const std::list&) override { + NOT_REACHED_GCOVR_EXCL_LINE; + } // Network::UdpListenerFilterManager void addReadFilter(Network::UdpListenerReadFilterPtr&& filter) override; diff --git a/source/server/admin/BUILD b/source/server/admin/BUILD index 19463acd7a35b..365a5b8109a9a 100644 --- a/source/server/admin/BUILD +++ b/source/server/admin/BUILD @@ -117,6 +117,7 @@ envoy_cc_library( hdrs = ["prometheus_stats.h"], deps = [ ":utils_lib", + "//envoy/stats:custom_stat_namespaces_interface", "//source/common/buffer:buffer_lib", "//source/common/stats:histogram_lib", ], diff --git a/source/server/admin/prometheus_stats.cc b/source/server/admin/prometheus_stats.cc index b0b01dc9759ac..a3d0c7657335a 100644 --- a/source/server/admin/prometheus_stats.cc +++ b/source/server/admin/prometheus_stats.cc @@ -2,6 +2,7 @@ #include "source/common/common/empty_string.h" #include "source/common/common/macros.h" +#include "source/common/common/regex.h" #include "source/common/stats/histogram_impl.h" #include "absl/strings/str_cat.h" @@ -11,19 +12,18 @@ namespace Server { namespace { -const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } -const std::regex& namespaceRegex() { - CONSTRUCT_ON_FIRST_USE(std::regex, "^[a-zA-Z_][a-zA-Z0-9]*$"); +const Regex::CompiledGoogleReMatcher& promRegex() { + CONSTRUCT_ON_FIRST_USE(Regex::CompiledGoogleReMatcher, "[^a-zA-Z0-9_]", false); } /** * Take a string and sanitize it according to Prometheus conventions. */ -std::string sanitizeName(const std::string& name) { +std::string sanitizeName(const absl::string_view name) { // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/. // The initial [a-zA-Z_] constraint is always satisfied by the namespace prefix. - return std::regex_replace(name, promRegex(), "_"); + return promRegex().replaceAll(name, "_"); } /* @@ -67,7 +67,7 @@ uint64_t outputStatType( const std::vector>& metrics, const std::function& generate_output, - absl::string_view type) { + absl::string_view type, const Stats::CustomStatNamespaces& custom_namespaces) { /* * From @@ -112,10 +112,16 @@ uint64_t outputStatType( groups[metric->tagExtractedStatName()].push_back(metric.get()); } + auto result = groups.size(); for (auto& group : groups) { - const std::string prefixed_tag_extracted_name = - PrometheusStatsFormatter::metricName(global_symbol_table.toString(group.first)); - response.add(fmt::format("# TYPE {0} {1}\n", prefixed_tag_extracted_name, type)); + const absl::optional prefixed_tag_extracted_name = + PrometheusStatsFormatter::metricName(global_symbol_table.toString(group.first), + custom_namespaces); + if (!prefixed_tag_extracted_name.has_value()) { + --result; + continue; + } + response.add(fmt::format("# TYPE {0} {1}\n", prefixed_tag_extracted_name.value(), type)); // Sort before producing the final output to satisfy the "preferred" ordering from the // prometheus spec: metrics will be sorted by their tags' textual representation, which will @@ -123,11 +129,11 @@ uint64_t outputStatType( std::sort(group.second.begin(), group.second.end(), MetricLessThan()); for (const auto& metric : group.second) { - response.add(generate_output(*metric, prefixed_tag_extracted_name)); + response.add(generate_output(*metric, prefixed_tag_extracted_name.value())); } response.add("\n"); } - return groups.size(); + return result; } /* @@ -176,10 +182,6 @@ std::string generateHistogramOutput(const Stats::ParentHistogram& histogram, return output; }; -absl::flat_hash_set& prometheusNamespaces() { - MUTABLE_CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set); -} - } // namespace std::string PrometheusStatsFormatter::formattedTags(const std::vector& tags) { @@ -191,20 +193,30 @@ std::string PrometheusStatsFormatter::formattedTags(const std::vector +PrometheusStatsFormatter::metricName(const std::string& extracted_name, + const Stats::CustomStatNamespaces& custom_namespaces) { + const absl::optional custom_namespace_stripped = + custom_namespaces.stripRegisteredPrefix(extracted_name); + if (custom_namespace_stripped.has_value()) { + // This case the name has a custom namespace, and it is a custom metric. + const std::string sanitized_name = sanitizeName(custom_namespace_stripped.value()); + // We expose these metrics without modifying (e.g. without "envoy_"), + // so we have to check the "user-defined" stat name complies with the Prometheus naming + // convention. Specifically the name must start with the "[a-zA-Z_]" pattern. + // All the characters in sanitized_name are already in "[a-zA-Z0-9_]" pattern + // thanks to sanitizeName above, so the only thing we have to do is check + // if it does not start with digits. + if (sanitized_name.empty() || absl::ascii_isdigit(sanitized_name.front())) { + return absl::nullopt; + } return sanitized_name; } - // Add namespacing prefix to avoid conflicts, as per best practice: - // https://prometheus.io/docs/practices/naming/#metric-names - // Also, naming conventions on https://prometheus.io/docs/concepts/data_model/ - return absl::StrCat("envoy_", sanitized_name); + // If it does not have a custom namespace, add namespacing prefix to avoid conflicts, as per best + // practice: https://prometheus.io/docs/practices/naming/#metric-names Also, naming conventions on + // https://prometheus.io/docs/concepts/data_model/ + return absl::StrCat("envoy_", sanitizeName(extracted_name)); } // TODO(efimki): Add support of text readouts stats. @@ -212,38 +224,24 @@ uint64_t PrometheusStatsFormatter::statsAsPrometheus( const std::vector& counters, const std::vector& gauges, const std::vector& histograms, Buffer::Instance& response, - const bool used_only, const absl::optional& regex) { + const bool used_only, const absl::optional& regex, + const Stats::CustomStatNamespaces& custom_namespaces) { uint64_t metric_name_count = 0; - metric_name_count += outputStatType( - response, used_only, regex, counters, generateNumericOutput, "counter"); + metric_name_count += outputStatType(response, used_only, regex, counters, + generateNumericOutput, + "counter", custom_namespaces); - metric_name_count += outputStatType(response, used_only, regex, gauges, - generateNumericOutput, "gauge"); + metric_name_count += + outputStatType(response, used_only, regex, gauges, + generateNumericOutput, "gauge", custom_namespaces); - metric_name_count += outputStatType( - response, used_only, regex, histograms, generateHistogramOutput, "histogram"); + metric_name_count += outputStatType(response, used_only, regex, + histograms, generateHistogramOutput, + "histogram", custom_namespaces); return metric_name_count; } -bool PrometheusStatsFormatter::registerPrometheusNamespace(absl::string_view prometheus_namespace) { - if (std::regex_match(prometheus_namespace.begin(), prometheus_namespace.end(), - namespaceRegex())) { - return prometheusNamespaces().insert(std::string(prometheus_namespace)).second; - } - return false; -} - -bool PrometheusStatsFormatter::unregisterPrometheusNamespace( - absl::string_view prometheus_namespace) { - auto it = prometheusNamespaces().find(prometheus_namespace); - if (it == prometheusNamespaces().end()) { - return false; - } - prometheusNamespaces().erase(it); - return true; -} - } // namespace Server } // namespace Envoy diff --git a/source/server/admin/prometheus_stats.h b/source/server/admin/prometheus_stats.h index 6e45db166db5e..ea1d53a0ccb4f 100644 --- a/source/server/admin/prometheus_stats.h +++ b/source/server/admin/prometheus_stats.h @@ -4,6 +4,7 @@ #include #include "envoy/buffer/buffer.h" +#include "envoy/stats/custom_stat_namespaces.h" #include "envoy/stats/histogram.h" #include "envoy/stats/stats.h" @@ -25,7 +26,8 @@ class PrometheusStatsFormatter { const std::vector& gauges, const std::vector& histograms, Buffer::Instance& response, const bool used_only, - const absl::optional& regex); + const absl::optional& regex, + const Stats::CustomStatNamespaces& custom_namespaces); /** * Format the given tags, returning a string as a comma-separated list * of ="" pairs. @@ -33,26 +35,14 @@ class PrometheusStatsFormatter { static std::string formattedTags(const std::vector& tags); /** - * Format the given metric name, prefixed with "envoy_". + * Format the given metric name, and prefixed with "envoy_" if it does not have a custom + * stat namespace. If it has a custom stat namespace AND the name without the custom namespace + * has a valid prometheus namespace, the trimmed name is returned. + * Otherwise, return nullopt. */ - static std::string metricName(const std::string& extracted_name); - - /** - * Register a prometheus namespace, stats starting with the namespace will not be - * automatically prefixed with envoy namespace. - * This method must be called from the main thread. - * @returns bool if a new namespace is registered, false if the namespace is already - * registered or the namespace is invalid. - */ - static bool registerPrometheusNamespace(absl::string_view prometheus_namespace); - - /** - * Unregister a prometheus namespace registered by `registerPrometheusNamespace` - * This method must be called from the main thread. - * @returns bool if the Prometheus namespace is unregistered. false if the namespace - * wasn't registered. - */ - static bool unregisterPrometheusNamespace(absl::string_view prometheus_namespace); + static absl::optional + metricName(const std::string& extracted_name, + const Stats::CustomStatNamespaces& custom_namespace_factory); }; } // namespace Server diff --git a/source/server/admin/stats_handler.cc b/source/server/admin/stats_handler.cc index d8426f10abc57..929e9474587dd 100644 --- a/source/server/admin/stats_handler.cc +++ b/source/server/admin/stats_handler.cc @@ -82,6 +82,11 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, return Http::Code::BadRequest; } + const absl::optional format_value = Utility::formatParam(params); + if (format_value.has_value() && format_value.value() == "prometheus") { + return handlerPrometheusStats(url, response_headers, response, admin_stream); + } + std::map all_stats; for (const Stats::CounterSharedPtr& counter : server_.stats().counters()) { if (shouldShowMetric(*counter, used_only, regex)) { @@ -103,7 +108,6 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, } } - absl::optional format_value = Utility::formatParam(params); if (!format_value.has_value()) { // Display plain stats if format query param is not there. statsAsText(all_stats, text_readouts, server_.stats().histograms(), used_only, regex, response); @@ -117,10 +121,6 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, return Http::Code::OK; } - if (format_value.value() == "prometheus") { - return handlerPrometheusStats(url, response_headers, response, admin_stream); - } - response.add("usage: /stats?format=json or /stats?format=prometheus \n"); response.add("\n"); return Http::Code::NotFound; @@ -138,7 +138,7 @@ Http::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query } PrometheusStatsFormatter::statsAsPrometheus(server_.stats().counters(), server_.stats().gauges(), server_.stats().histograms(), response, used_only, - regex); + regex, server_.api().customStatNamespaces()); return Http::Code::OK; } diff --git a/source/server/api_listener_impl.h b/source/server/api_listener_impl.h index 6d2b41bcbd3fc..088ec965f21fc 100644 --- a/source/server/api_listener_impl.h +++ b/source/server/api_listener_impl.h @@ -112,7 +112,7 @@ class ApiListenerImplBase : public ApiListener, bool isHalfCloseEnabled() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } void close(Network::ConnectionCloseType) override {} Event::Dispatcher& dispatcher() override { - return parent_.parent_.factory_context_.dispatcher(); + return parent_.parent_.factory_context_.mainThreadDispatcher(); } uint64_t id() const override { return 12345; } void hashKey(std::vector&) const override {} diff --git a/source/server/config_validation/cluster_manager.cc b/source/server/config_validation/cluster_manager.cc index 16ab99e868fc0..742fb4bb3b5ed 100644 --- a/source/server/config_validation/cluster_manager.cc +++ b/source/server/config_validation/cluster_manager.cc @@ -11,8 +11,9 @@ namespace Upstream { ClusterManagerPtr ValidationClusterManagerFactory::clusterManagerFromProto( const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { return std::make_unique( - bootstrap, *this, stats_, tls_, runtime_, local_info_, log_manager_, main_thread_dispatcher_, - admin_, validation_context_, api_, http_context_, grpc_context_, router_context_); + bootstrap, *this, stats_, tls_, context_.runtime(), local_info_, log_manager_, + context_.mainThreadDispatcher(), admin_, validation_context_, context_.api(), http_context_, + grpc_context_, router_context_); } CdsApiPtr ValidationClusterManagerFactory::createCds( diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index 6a0039c7f189e..57eba26cabcbb 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -83,7 +83,9 @@ void ValidationInstance::initialize(const Options& options, messageValidationContext().staticValidationVisitor(), *api_); Config::Utility::createTagProducer(bootstrap_); - bootstrap_.mutable_node()->set_hidden_envoy_deprecated_build_version(VersionInfo::version()); + if (!bootstrap_.node().user_agent_build_version().has_version()) { + *bootstrap_.mutable_node()->mutable_user_agent_build_version() = VersionInfo::buildVersion(); + } local_info_ = std::make_unique( stats().symbolTable(), bootstrap_.node(), bootstrap_.node_context_params(), local_address, diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 4f64afc227b06..4bcdcd9cc16aa 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -27,9 +27,19 @@ void ConnectionHandlerImpl::decNumConnections() { void ConnectionHandlerImpl::addListener(absl::optional overridden_listener, Network::ListenerConfig& config) { + const bool support_udp_in_place_filter_chain_update = Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.udp_listener_updates_filter_chain_in_place"); + if (support_udp_in_place_filter_chain_update && overridden_listener.has_value()) { + ActiveListenerDetailsOptRef listener_detail = + findActiveListenerByTag(overridden_listener.value()); + ASSERT(listener_detail.has_value()); + listener_detail->get().listener_->updateListenerConfig(config); + return; + } + ActiveListenerDetails details; if (config.listenSocketFactory().socketType() == Network::Socket::Type::Stream) { - if (overridden_listener.has_value()) { + if (!support_udp_in_place_filter_chain_update && overridden_listener.has_value()) { for (auto& listener : listeners_) { if (listener.second.listener_->listenerTag() == overridden_listener) { listener.second.tcpListener()->get().updateListenerConfig(config); @@ -89,7 +99,7 @@ void ConnectionHandlerImpl::removeFilterChains( std::function completion) { for (auto& listener : listeners_) { if (listener.second.listener_->listenerTag() == listener_tag) { - listener.second.tcpListener()->get().deferredRemoveFilterChains(filter_chains); + listener.second.listener_->onFilterChainDraining(filter_chains); break; } } diff --git a/source/server/factory_context_base_impl.h b/source/server/factory_context_base_impl.h new file mode 100644 index 0000000000000..0579f86a4eb51 --- /dev/null +++ b/source/server/factory_context_base_impl.h @@ -0,0 +1,57 @@ +#pragma once + +#include "envoy/server/factory_context.h" + +namespace Envoy { +namespace Server { + +class FactoryContextBaseImpl : public Configuration::FactoryContextBase { +public: + FactoryContextBaseImpl(const Server::Options& options, Event::Dispatcher& main_thread_dispatcher, + Api::Api& api, const LocalInfo::LocalInfo& local_info, + Server::Admin& admin, Runtime::Loader& runtime, + Singleton::Manager& singleton_manager, + ProtobufMessage::ValidationVisitor& validation_visitor, + Stats::Store& scope, ThreadLocal::Instance& local) + : options_(options), main_thread_dispatcher_(main_thread_dispatcher), api_(api), + local_info_(local_info), admin_(admin), runtime_(runtime), + singleton_manager_(singleton_manager), validation_visitor_(validation_visitor), + scope_(scope), thread_local_(local) {} + + FactoryContextBaseImpl(Configuration::FactoryContextBase& config) + : options_(config.options()), main_thread_dispatcher_(config.mainThreadDispatcher()), + api_(config.api()), local_info_(config.localInfo()), admin_(config.admin()), + runtime_(config.runtime()), singleton_manager_(config.singletonManager()), + validation_visitor_(config.messageValidationVisitor()), scope_(config.scope()), + thread_local_(config.threadLocal()) {} + + // FactoryContextBase + const Options& options() override { return options_; }; + Event::Dispatcher& mainThreadDispatcher() override { return main_thread_dispatcher_; }; + Api::Api& api() override { return api_; }; + const LocalInfo::LocalInfo& localInfo() const override { return local_info_; }; + Server::Admin& admin() override { return admin_; }; + Envoy::Runtime::Loader& runtime() override { return runtime_; }; + Singleton::Manager& singletonManager() override { return singleton_manager_; }; + ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { + return validation_visitor_; + }; + Stats::Scope& scope() override { return scope_; }; + Stats::Scope& serverScope() override { return scope_; } + ThreadLocal::SlotAllocator& threadLocal() override { return thread_local_; }; + +private: + const Server::Options& options_; + Event::Dispatcher& main_thread_dispatcher_; + Api::Api& api_; + const LocalInfo::LocalInfo& local_info_; + Server::Admin& admin_; + Runtime::Loader& runtime_; + Singleton::Manager& singleton_manager_; + ProtobufMessage::ValidationVisitor& validation_visitor_; + Stats::Scope& scope_; + ThreadLocal::SlotAllocator& thread_local_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index f1a74a70e78cd..f99ed1f1858c0 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -69,8 +69,8 @@ Upstream::ClusterManager& PerFilterChainFactoryContextImpl::clusterManager() { return parent_context_.clusterManager(); } -Event::Dispatcher& PerFilterChainFactoryContextImpl::dispatcher() { - return parent_context_.dispatcher(); +Event::Dispatcher& PerFilterChainFactoryContextImpl::mainThreadDispatcher() { + return parent_context_.mainThreadDispatcher(); } const Server::Options& PerFilterChainFactoryContextImpl::options() { @@ -755,7 +755,7 @@ AccessLog::AccessLogManager& FactoryContextImpl::accessLogManager() { return server_.accessLogManager(); } Upstream::ClusterManager& FactoryContextImpl::clusterManager() { return server_.clusterManager(); } -Event::Dispatcher& FactoryContextImpl::dispatcher() { return server_.dispatcher(); } +Event::Dispatcher& FactoryContextImpl::mainThreadDispatcher() { return server_.dispatcher(); } const Server::Options& FactoryContextImpl::options() { return server_.options(); } Grpc::Context& FactoryContextImpl::grpcContext() { return server_.grpcContext(); } Router::Context& FactoryContextImpl::routerContext() { return server_.routerContext(); } diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 8218e89777127..6bc170c93e01d 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -55,7 +55,7 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor // Configuration::FactoryContext AccessLog::AccessLogManager& accessLogManager() override; Upstream::ClusterManager& clusterManager() override; - Event::Dispatcher& dispatcher() override; + Event::Dispatcher& mainThreadDispatcher() override; const Server::Options& options() override; Network::DrainDecision& drainDecision() override; Grpc::Context& grpcContext() override; @@ -66,6 +66,7 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor const LocalInfo::LocalInfo& localInfo() const override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; + Stats::Scope& serverScope() override { return parent_context_.serverScope(); } Singleton::Manager& singletonManager() override; OverloadManager& overloadManager() override; ThreadLocal::SlotAllocator& threadLocal() override; @@ -141,7 +142,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { // Configuration::FactoryContext AccessLog::AccessLogManager& accessLogManager() override; Upstream::ClusterManager& clusterManager() override; - Event::Dispatcher& dispatcher() override; + Event::Dispatcher& mainThreadDispatcher() override; const Server::Options& options() override; Grpc::Context& grpcContext() override; Router::Context& routerContext() override; @@ -151,6 +152,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { const LocalInfo::LocalInfo& localInfo() const override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; + Stats::Scope& serverScope() override { return server_.stats(); } Singleton::Manager& singletonManager() override; OverloadManager& overloadManager() override; ThreadLocal::SlotAllocator& threadLocal() override; diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index 9aafc65b714f8..cfc8256d54200 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -13,7 +13,7 @@ #include "envoy/server/guarddog.h" #include "envoy/server/guarddog_config.h" #include "envoy/stats/scope.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" #include "source/common/common/assert.h" #include "source/common/common/fmt.h" @@ -69,14 +69,14 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio // Add default abort_action if kill and/or multi-kill is enabled. if (config.killTimeout().count() > 0) { - envoy::watchdog::v3alpha::AbortActionConfig abort_config; + envoy::watchdog::v3::AbortActionConfig abort_config; WatchDogAction* abort_action_config = actions.Add(); abort_action_config->set_event(WatchDogAction::KILL); abort_action_config->mutable_config()->mutable_typed_config()->PackFrom(abort_config); } if (config.multiKillTimeout().count() > 0) { - envoy::watchdog::v3alpha::AbortActionConfig abort_config; + envoy::watchdog::v3::AbortActionConfig abort_config; WatchDogAction* abort_action_config = actions.Add(); abort_action_config->set_event(WatchDogAction::MULTIKILL); abort_action_config->mutable_config()->mutable_typed_config()->PackFrom(abort_config); diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index c6a666477ce35..943ba20d4c18b 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -223,7 +223,9 @@ AccessLog::AccessLogManager& ListenerFactoryContextBaseImpl::accessLogManager() Upstream::ClusterManager& ListenerFactoryContextBaseImpl::clusterManager() { return server_.clusterManager(); } -Event::Dispatcher& ListenerFactoryContextBaseImpl::dispatcher() { return server_.dispatcher(); } +Event::Dispatcher& ListenerFactoryContextBaseImpl::mainThreadDispatcher() { + return server_.dispatcher(); +} const Server::Options& ListenerFactoryContextBaseImpl::options() { return server_.options(); } Grpc::Context& ListenerFactoryContextBaseImpl::grpcContext() { return server_.grpcContext(); } bool ListenerFactoryContextBaseImpl::healthCheckFailed() { return server_.healthCheckFailed(); } @@ -371,7 +373,7 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, const envoy::config::listener::v3::Listener& config, const std::string& version_info, ListenerManagerImpl& parent, const std::string& name, bool added_via_api, bool workers_started, - uint64_t hash, uint32_t concurrency) + uint64_t hash) : parent_(parent), address_(origin.address_), bind_to_port_(shouldBindToPort(config)), hand_off_restored_destination_connections_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, use_original_dst, false)), @@ -392,6 +394,7 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, listener_filters_timeout_( PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)), continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()), + udp_listener_config_(origin.udp_listener_config_), connection_balancer_(origin.connection_balancer_), listener_factory_context_(std::make_shared( origin.listener_factory_context_->listener_factory_context_base_, this, *this)), @@ -407,18 +410,18 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, quic_stat_names_(parent_.quicStatNames()) { buildAccessLog(); auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); - // buildUdpListenerFactory() must come before buildListenSocketOptions() because the UDP - // listener factory can provide additional options. - buildUdpListenerFactory(socket_type, concurrency); buildListenSocketOptions(socket_type); createListenerFilterFactories(socket_type); validateFilterChains(socket_type); buildFilterChains(); - // In place update is tcp only so it's safe to apply below tcp only initialization. - buildSocketOptions(); - buildOriginalDstListenerFilter(); - buildProxyProtocolListenerFilter(); - open_connections_ = origin.open_connections_; + + if (socket_type == Network::Socket::Type::Stream) { + // Apply the options below only for TCP. + buildSocketOptions(); + buildOriginalDstListenerFilter(); + buildProxyProtocolListenerFilter(); + open_connections_ = origin.open_connections_; + } } void ListenerImpl::buildAccessLog() { @@ -441,7 +444,7 @@ void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, "set concurrency = 1."); } - udp_listener_config_ = std::make_unique(config_.udp_listener_config()); + udp_listener_config_ = std::make_shared(config_.udp_listener_config()); if (config_.udp_listener_config().has_quic_options()) { #ifdef ENVOY_ENABLE_QUIC if (config_.has_connection_balance_config()) { @@ -552,6 +555,15 @@ void ListenerImpl::validateFilterChains(Network::Socket::Type socket_type) { "specified for connection oriented UDP listener", address_->asString())); } + } else if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.udp_listener_updates_filter_chain_in_place") && + (!config_.filter_chains().empty() || config_.has_default_filter_chain()) && + udp_listener_config_ != nullptr && + udp_listener_config_->listener_factory_->isTransportConnectionless()) { + + throw EnvoyException(fmt::format("error adding listener '{}': {} filter chain(s) specified for " + "connection-less UDP listener.", + address_->asString(), config_.filter_chains_size())); } } @@ -631,8 +643,8 @@ AccessLog::AccessLogManager& PerListenerFactoryContextImpl::accessLogManager() { Upstream::ClusterManager& PerListenerFactoryContextImpl::clusterManager() { return listener_factory_context_base_->clusterManager(); } -Event::Dispatcher& PerListenerFactoryContextImpl::dispatcher() { - return listener_factory_context_base_->dispatcher(); +Event::Dispatcher& PerListenerFactoryContextImpl::mainThreadDispatcher() { + return listener_factory_context_base_->mainThreadDispatcher(); } const Server::Options& PerListenerFactoryContextImpl::options() { return listener_factory_context_base_->options(); @@ -767,11 +779,12 @@ bool ListenerImpl::supportUpdateFilterChain(const envoy::config::listener::v3::L return false; } - // Currently we only support TCP filter chain update. - if (Network::Utility::protobufAddressSocketType(config_.address()) != - Network::Socket::Type::Stream || - Network::Utility::protobufAddressSocketType(config.address()) != - Network::Socket::Type::Stream) { + if (!Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.udp_listener_updates_filter_chain_in_place") && + (Network::Utility::protobufAddressSocketType(config_.address()) != + Network::Socket::Type::Stream || + Network::Utility::protobufAddressSocketType(config.address()) != + Network::Socket::Type::Stream)) { return false; } @@ -793,10 +806,10 @@ ListenerImplPtr ListenerImpl::newListenerWithFilterChain(const envoy::config::listener::v3::Listener& config, bool workers_started, uint64_t hash) { // Use WrapUnique since the constructor is private. - return absl::WrapUnique( - new ListenerImpl(*this, config, version_info_, parent_, name_, added_via_api_, - /* new new workers started state */ workers_started, - /* use new hash */ hash, parent_.server_.options().concurrency())); + return absl::WrapUnique(new ListenerImpl(*this, config, version_info_, parent_, name_, + added_via_api_, + /* new new workers started state */ workers_started, + /* use new hash */ hash)); } void ListenerImpl::diffFilterChain(const ListenerImpl& another_listener, diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 0114ff9c9e33e..aac102089f780 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -104,7 +104,7 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex Server::DrainManagerPtr drain_manager); AccessLog::AccessLogManager& accessLogManager() override; Upstream::ClusterManager& clusterManager() override; - Event::Dispatcher& dispatcher() override; + Event::Dispatcher& mainThreadDispatcher() override; const Server::Options& options() override; Network::DrainDecision& drainDecision() override; Grpc::Context& grpcContext() override; @@ -114,6 +114,7 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; Envoy::Runtime::Loader& runtime() override; + Stats::Scope& serverScope() override { return server_.stats(); } Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; OverloadManager& overloadManager() override; @@ -177,7 +178,7 @@ class PerListenerFactoryContextImpl : public Configuration::ListenerFactoryConte // FactoryContext AccessLog::AccessLogManager& accessLogManager() override; Upstream::ClusterManager& clusterManager() override; - Event::Dispatcher& dispatcher() override; + Event::Dispatcher& mainThreadDispatcher() override; const Options& options() override; Network::DrainDecision& drainDecision() override; Grpc::Context& grpcContext() override; @@ -188,6 +189,7 @@ class PerListenerFactoryContextImpl : public Configuration::ListenerFactoryConte const LocalInfo::LocalInfo& localInfo() const override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; + Stats::Scope& serverScope() override { return listener_factory_context_base_->serverScope(); } Singleton::Manager& singletonManager() override; OverloadManager& overloadManager() override; ThreadLocal::Instance& threadLocal() override; @@ -365,8 +367,7 @@ class ListenerImpl final : public Network::ListenerConfig, */ ListenerImpl(ListenerImpl& origin, const envoy::config::listener::v3::Listener& config, const std::string& version_info, ListenerManagerImpl& parent, - const std::string& name, bool added_via_api, bool workers_started, uint64_t hash, - uint32_t concurrency); + const std::string& name, bool added_via_api, bool workers_started, uint64_t hash); // Helpers for constructor. void buildAccessLog(); void buildUdpListenerFactory(Network::Socket::Type socket_type, uint32_t concurrency); @@ -413,7 +414,7 @@ class ListenerImpl final : public Network::ListenerConfig, Network::Socket::OptionsSharedPtr listen_socket_options_; const std::chrono::milliseconds listener_filters_timeout_; const bool continue_on_listener_filters_timeout_; - std::unique_ptr udp_listener_config_; + std::shared_ptr udp_listener_config_; Network::ConnectionBalancerSharedPtr connection_balancer_; std::shared_ptr listener_factory_context_; FilterChainManagerImpl filter_chain_manager_; diff --git a/source/server/proto_descriptors.cc b/source/server/proto_descriptors.cc index 9638b84ba4f3b..e94492c531cc3 100644 --- a/source/server/proto_descriptors.cc +++ b/source/server/proto_descriptors.cc @@ -20,6 +20,7 @@ void validateProtoDescriptors() { "envoy.service.endpoint.v3.EndpointDiscoveryService.FetchEndpoints", "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints", "envoy.service.endpoint.v3.EndpointDiscoveryService.DeltaEndpoints", + "envoy.service.endpoint.v3.LocalityEndpointDiscoveryService.DeltaLocalityEndpoints", "envoy.service.health.v3.HealthDiscoveryService.FetchHealthCheck", "envoy.service.health.v3.HealthDiscoveryService.StreamHealthCheck", "envoy.service.listener.v3.ListenerDiscoveryService.FetchListeners", @@ -39,9 +40,10 @@ void validateProtoDescriptors() { } const auto types = { - "envoy.config.cluster.v3.Cluster", "envoy.config.endpoint.v3.ClusterLoadAssignment", - "envoy.config.listener.v3.Listener", "envoy.config.route.v3.RouteConfiguration", - "envoy.config.route.v3.VirtualHost", "envoy.extensions.transport_sockets.tls.v3.Secret", + "envoy.config.cluster.v3.Cluster", "envoy.config.endpoint.v3.ClusterLoadAssignment", + "envoy.config.listener.v3.Listener", "envoy.config.route.v3.RouteConfiguration", + "envoy.config.route.v3.VirtualHost", "envoy.extensions.transport_sockets.tls.v3.Secret", + "envoy.config.endpoint.v3.LbEndpoint", }; for (const auto& type : types) { diff --git a/source/server/resource_monitor_config_impl.h b/source/server/resource_monitor_config_impl.h index 03ef1c0170941..5bfc8d521e69f 100644 --- a/source/server/resource_monitor_config_impl.h +++ b/source/server/resource_monitor_config_impl.h @@ -14,7 +14,7 @@ class ResourceMonitorFactoryContextImpl : public ResourceMonitorFactoryContext { : dispatcher_(dispatcher), options_(options), api_(api), validation_visitor_(validation_visitor) {} - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } const Server::Options& options() override { return options_; } diff --git a/source/server/server.cc b/source/server/server.cc index 152058301be54..f35aa52fc58fb 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -106,7 +106,7 @@ InstanceImpl::InstanceImpl( restarter_.initialize(*dispatcher_, *this); drain_manager_ = component_factory.createDrainManager(*this); - initialize(options, std::move(local_address), component_factory); + initialize(std::move(local_address), component_factory); } END_TRY catch (const EnvoyException& e) { @@ -166,16 +166,26 @@ void InstanceImpl::failHealthcheck(bool fail) { } MetricSnapshotImpl::MetricSnapshotImpl(Stats::Store& store, TimeSource& time_source) { - store.forEachCounter([this](std::size_t size) mutable { counters_.reserve(size); }, - [this](Stats::Counter& counter) mutable { - counters_.push_back({counter.latch(), counter}); - }); - - store.forEachGauge([this](std::size_t size) mutable { gauges_.reserve(size); }, - [this](Stats::Gauge& gauge) mutable { - ASSERT(gauge.importMode() != Stats::Gauge::ImportMode::Uninitialized); - gauges_.push_back(gauge); - }); + store.forEachCounter( + [this](std::size_t size) mutable { + snapped_counters_.reserve(size); + counters_.reserve(size); + }, + [this](Stats::Counter& counter) mutable { + snapped_counters_.push_back(Stats::CounterSharedPtr(&counter)); + counters_.push_back({counter.latch(), counter}); + }); + + store.forEachGauge( + [this](std::size_t size) mutable { + snapped_gauges_.reserve(size); + gauges_.reserve(size); + }, + [this](Stats::Gauge& gauge) mutable { + ASSERT(gauge.importMode() != Stats::Gauge::ImportMode::Uninitialized); + snapped_gauges_.push_back(Stats::GaugeSharedPtr(&gauge)); + gauges_.push_back(gauge); + }); snapped_histograms_ = store.histograms(); histograms_.reserve(snapped_histograms_.size()); @@ -184,8 +194,14 @@ MetricSnapshotImpl::MetricSnapshotImpl(Stats::Store& store, TimeSource& time_sou } store.forEachTextReadout( - [this](std::size_t size) mutable { text_readouts_.reserve(size); }, - [this](Stats::TextReadout& text_readout) { text_readouts_.push_back(text_readout); }); + [this](std::size_t size) mutable { + snapped_text_readouts_.reserve(size); + text_readouts_.reserve(size); + }, + [this](Stats::TextReadout& text_readout) { + snapped_text_readouts_.push_back(Stats::TextReadoutSharedPtr(&text_readout)); + text_readouts_.push_back(text_readout); + }); snapshot_time_ = time_source.systemTime(); } @@ -346,11 +362,10 @@ void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& MessageUtil::validate(bootstrap, validation_visitor); } -void InstanceImpl::initialize(const Options& options, - Network::Address::InstanceConstSharedPtr local_address, +void InstanceImpl::initialize(Network::Address::InstanceConstSharedPtr local_address, ComponentFactory& component_factory) { ENVOY_LOG(info, "initializing epoch {} (base id={}, hot restart version={})", - options.restartEpoch(), restarter_.baseId(), restarter_.version()); + options_.restartEpoch(), restarter_.baseId(), restarter_.version()); ENVOY_LOG(info, "statically linked extensions:"); for (const auto& ext : Envoy::Registry::FactoryCategoryRegistry::registeredFactories()) { @@ -358,7 +373,7 @@ void InstanceImpl::initialize(const Options& options, } // Handle configuration that needs to take place prior to the main configuration load. - InstanceUtil::loadBootstrapConfig(bootstrap_, options, + InstanceUtil::loadBootstrapConfig(bootstrap_, options_, messageValidationContext().staticValidationVisitor(), *api_); bootstrap_config_update_time_ = time_source_.systemTime(); @@ -397,10 +412,9 @@ void InstanceImpl::initialize(const Options& options, POOL_COUNTER_PREFIX(stats_store_, server_compilation_settings_stats_prefix), POOL_GAUGE_PREFIX(stats_store_, server_compilation_settings_stats_prefix), POOL_HISTOGRAM_PREFIX(stats_store_, server_compilation_settings_stats_prefix))}); - validation_context_.staticWarningValidationVisitor().setUnknownCounter( - server_stats_->static_unknown_fields_); - validation_context_.dynamicWarningValidationVisitor().setUnknownCounter( - server_stats_->dynamic_unknown_fields_); + validation_context_.setCounters(server_stats_->static_unknown_fields_, + server_stats_->dynamic_unknown_fields_, + server_stats_->wip_protos_); initialization_timer_ = std::make_unique( server_stats_->initialization_time_ms_, timeSource()); @@ -456,7 +470,7 @@ void InstanceImpl::initialize(const Options& options, local_info_ = std::make_unique( stats().symbolTable(), bootstrap_.node(), bootstrap_.node_context_params(), local_address, - options.serviceZone(), options.serviceClusterName(), options.serviceNodeName()); + options_.serviceZone(), options_.serviceClusterName(), options_.serviceNodeName()); Configuration::InitialImpl initial_config(bootstrap_); @@ -480,7 +494,7 @@ void InstanceImpl::initialize(const Options& options, // Initialize the overload manager early so other modules can register for actions. overload_manager_ = std::make_unique( *dispatcher_, stats_store_, thread_local_, bootstrap_.overload_manager(), - messageValidationContext().staticValidationVisitor(), *api_, options); + messageValidationContext().staticValidationVisitor(), *api_, options_); heap_shrinker_ = std::make_unique(*dispatcher_, *overload_manager_, stats_store_); @@ -563,7 +577,7 @@ void InstanceImpl::initialize(const Options& options, initial_config.initAdminAccessLog(bootstrap_, *this); if (initial_config.admin().address()) { - admin_->startHttpListener(initial_config.admin().accessLogs(), options.adminAddressPath(), + admin_->startHttpListener(initial_config.admin().accessLogs(), options_.adminAddressPath(), initial_config.admin().address(), initial_config.admin().socketOptions(), stats_store_.createScope("listener.admin.")); @@ -914,7 +928,7 @@ InstanceImpl::registerCallback(Stage stage, StageCallbackWithCompletion callback } void InstanceImpl::notifyCallbacksForStage(Stage stage, Event::PostCb completion_cb) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); const auto it = stage_callbacks_.find(stage); if (it != stage_callbacks_.end()) { for (const StageCallback& callback : it->second) { diff --git a/source/server/server.h b/source/server/server.h index 9234f6c2ae7b7..19ac1b5ce169a 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -71,6 +71,7 @@ struct ServerCompilationSettingsStats { COUNTER(envoy_bug_failures) \ COUNTER(dynamic_unknown_fields) \ COUNTER(static_unknown_fields) \ + COUNTER(wip_protos) \ COUNTER(dropped_stat_flushes) \ GAUGE(concurrency, NeverImport) \ GAUGE(days_until_first_cert_expiring, NeverImport) \ @@ -174,7 +175,7 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, // Configuration::ServerFactoryContext Upstream::ClusterManager& clusterManager() override { return server_.clusterManager(); } - Event::Dispatcher& dispatcher() override { return server_.dispatcher(); } + Event::Dispatcher& mainThreadDispatcher() override { return server_.dispatcher(); } const Server::Options& options() override { return server_.options(); } const LocalInfo::LocalInfo& localInfo() const override { return server_.localInfo(); } ProtobufMessage::ValidationContext& messageValidationContext() override { @@ -182,6 +183,7 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, } Envoy::Runtime::Loader& runtime() override { return server_.runtime(); } Stats::Scope& scope() override { return *server_scope_; } + Stats::Scope& serverScope() override { return *server_scope_; } Singleton::Manager& singletonManager() override { return server_.singletonManager(); } ThreadLocal::Instance& threadLocal() override { return server_.threadLocal(); } Admin& admin() override { return server_.admin(); } @@ -302,7 +304,7 @@ class InstanceImpl final : Logger::Loggable, ProtobufTypes::MessagePtr dumpBootstrapConfig(); void flushStatsInternal(); void updateServerStats(); - void initialize(const Options& options, Network::Address::InstanceConstSharedPtr local_address, + void initialize(Network::Address::InstanceConstSharedPtr local_address, ComponentFactory& component_factory); void loadServerFlags(const absl::optional& flags_path); void startWorkers(); diff --git a/source/server/transport_socket_config_impl.h b/source/server/transport_socket_config_impl.h index 600c2e9386826..7a94bd110cecf 100644 --- a/source/server/transport_socket_config_impl.h +++ b/source/server/transport_socket_config_impl.h @@ -40,7 +40,7 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { } Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } Stats::Store& stats() override { return stats_; } Init::Manager& initManager() override { ASSERT(init_manager_ != nullptr); diff --git a/test/benchmark/main.cc b/test/benchmark/main.cc index 6ab63fb6a51d7..3af9104963206 100644 --- a/test/benchmark/main.cc +++ b/test/benchmark/main.cc @@ -43,6 +43,7 @@ int main(int argc, char** argv) { } TestEnvironment::initializeTestMain(argv[0]); + Thread::TestThread test_thread; // Suppressing non-error messages in benchmark tests. This hides warning // messages that appear when using a runtime feature when there isn't an initialized diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 126088c418d3f..f309fb942a7ee 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -1592,42 +1592,37 @@ name: accesslog } } -// Test that the deprecated extension names still function. +// Test that the deprecated extension names are disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST_F(AccessLogImplTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { - { - envoy::config::accesslog::v3::AccessLog config; - config.set_name("envoy.access_loggers.file"); - - EXPECT_NO_THROW( - Config::Utility::getAndCheckFactory( - config)); - } - { envoy::config::accesslog::v3::AccessLog config; config.set_name("envoy.file_access_log"); - EXPECT_NO_THROW( + EXPECT_THROW( Config::Utility::getAndCheckFactory( - config)); + config), + EnvoyException); } { envoy::config::accesslog::v3::AccessLog config; config.set_name("envoy.http_grpc_access_log"); - EXPECT_NO_THROW( + EXPECT_THROW( Config::Utility::getAndCheckFactory( - config)); + config), + EnvoyException); } { envoy::config::accesslog::v3::AccessLog config; config.set_name("envoy.tcp_grpc_access_log"); - EXPECT_NO_THROW( + EXPECT_THROW( Config::Utility::getAndCheckFactory( - config)); + config), + EnvoyException); } } diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 98e4105927875..bc783732c18ce 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -10,12 +10,6 @@ licenses(["notice"]) # Apache 2 envoy_package() -envoy_cc_test( - name = "api_shadow_test", - srcs = ["api_shadow_test.cc"], - deps = ["@envoy_api//envoy/config/cluster/v3:pkg_cc_proto"], -) - envoy_cc_test( name = "decoded_resource_impl_test", srcs = ["decoded_resource_impl_test.cc"], @@ -492,9 +486,9 @@ envoy_cc_test( deps = [ "//source/common/common:empty_string", "//source/common/config:datasource_lib", + "//source/common/crypto:utility_lib", "//source/common/http:message_lib", "//source/common/protobuf:utility_lib", - "//source/extensions/common/crypto:utility_lib", "//test/mocks/event:event_mocks", "//test/mocks/init:init_mocks", "//test/mocks/runtime:runtime_mocks", diff --git a/test/common/config/api_shadow_test.cc b/test/common/config/api_shadow_test.cc deleted file mode 100644 index 2f4936ad5f813..0000000000000 --- a/test/common/config/api_shadow_test.cc +++ /dev/null @@ -1,20 +0,0 @@ -#include "envoy/config/cluster/v3/cluster.pb.h" - -#include "gtest/gtest.h" - -namespace Envoy { -namespace Config { -namespace { - -// Validate that deprecated fields are accessible via the shadow protos. -TEST(ApiShadowTest, All) { - envoy::config::cluster::v3::Cluster cluster; - - cluster.mutable_hidden_envoy_deprecated_tls_context(); - cluster.set_lb_policy( - envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB); -} - -} // namespace -} // namespace Config -} // namespace Envoy diff --git a/test/common/config/delta_subscription_state_test.cc b/test/common/config/delta_subscription_state_test.cc index 5c816a194b723..b8041de85b6b4 100644 --- a/test/common/config/delta_subscription_state_test.cc +++ b/test/common/config/delta_subscription_state_test.cc @@ -183,6 +183,50 @@ TEST_P(DeltaSubscriptionStateTest, SubscribeAndUnsubscribe) { } } +// Resources has no subscriptions should not be tracked. +TEST_P(DeltaSubscriptionStateTest, NewPushDoesntAddUntrackedResources) { + { // Add "name4", "name5", "name6" and remove "name1", "name2", "name3". + updateSubscriptionInterest({"name4", "name5", "name6"}, {"name1", "name2", "name3"}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), + UnorderedElementsAre("name1", "name2", "name3")); + } + { + // On Reconnection, only "name4", "name5", "name6" are sent. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + EXPECT_TRUE(cur_request->initial_resource_versions().empty()); + } + // The xDS server's first response includes removed items name1 and 2, and a + // completely unrelated resource "bluhbluh". + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource({{"name1", "version1A"}, + {"bluhbluh", "bluh"}, + {"name6", "version6A"}, + {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug1", "nonce1"); + EXPECT_EQ("nonce1", ack.nonce_); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + { // Simulate a stream reconnection, just to see the current resource_state_. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + ASSERT_EQ(cur_request->initial_resource_versions().size(), 1); + EXPECT_TRUE(cur_request->initial_resource_versions().contains("name6")); + EXPECT_EQ(cur_request->initial_resource_versions().at("name6"), "version6A"); + } +} + // Delta xDS reliably queues up and sends all discovery requests, even in situations where it isn't // strictly necessary. E.g.: if you subscribe but then unsubscribe to a given resource, all before a // request was able to be sent, two requests will be sent. The following tests demonstrate this. @@ -425,6 +469,50 @@ TEST_P(WildcardDeltaSubscriptionStateTest, SubscribeAndUnsubscribeAfterReconnect EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); } +// All resources from the server should be tracked. +TEST_P(WildcardDeltaSubscriptionStateTest, AllResourcesFromServerAreTrackedInWildcardXDS) { + { // Add "name4", "name5", "name6" and remove "name1", "name2", "name3". + updateSubscriptionInterest({"name4", "name5", "name6"}, {"name1", "name2", "name3"}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), + UnorderedElementsAre("name1", "name2", "name3")); + } + { + // On Reconnection, only "name4", "name5", "name6" are sent. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + EXPECT_TRUE(cur_request->initial_resource_versions().empty()); + } + // The xDS server's first response includes removed items name1 and 2, and a + // completely unrelated resource "bluhbluh". + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource({{"name1", "version1A"}, + {"bluhbluh", "bluh"}, + {"name6", "version6A"}, + {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug1", "nonce1"); + EXPECT_EQ("nonce1", ack.nonce_); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + { // Simulate a stream reconnection, just to see the current resource_state_. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + ASSERT_EQ(cur_request->initial_resource_versions().size(), 4); + EXPECT_EQ(cur_request->initial_resource_versions().at("name1"), "version1A"); + EXPECT_EQ(cur_request->initial_resource_versions().at("bluhbluh"), "bluh"); + EXPECT_EQ(cur_request->initial_resource_versions().at("name6"), "version6A"); + EXPECT_EQ(cur_request->initial_resource_versions().at("name2"), "version2A"); + } +} + // initial_resource_versions should not be present on messages after the first in a stream. TEST_P(DeltaSubscriptionStateTest, InitialVersionMapFirstMessageOnly) { // First, verify that the first message of a new stream sends initial versions. diff --git a/test/common/config/registry_test.cc b/test/common/config/registry_test.cc index 53d6979a42a69..9bc9c434e8ad7 100644 --- a/test/common/config/registry_test.cc +++ b/test/common/config/registry_test.cc @@ -91,18 +91,10 @@ class TestWithDeprecatedPublishedFactory : public PublishedFactory { REGISTER_FACTORY(TestWithDeprecatedPublishedFactory, PublishedFactory){"testing.published.deprecated_name"}; +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(RegistryTest, DEPRECATED_FEATURE_TEST(WithDeprecatedFactoryPublished)) { - EXPECT_EQ("testing.published.instead_name", - Envoy::Registry::FactoryRegistry::getFactory( - "testing.published.deprecated_name") - ->name()); - EXPECT_LOG_CONTAINS("warn", - fmt::format("Using deprecated extension name '{}' for '{}'.", - "testing.published.deprecated_name", - "testing.published.instead_name"), - Envoy::Registry::FactoryRegistry::getFactory( - "testing.published.deprecated_name") - ->name()); + EXPECT_EQ(nullptr, Envoy::Registry::FactoryRegistry::getFactory( + "testing.published.deprecated_name")); } class NoNamePublishedFactory : public PublishedFactory { @@ -161,17 +153,9 @@ REGISTER_FACTORY(TestVersionedWithDeprecatedNamesFactory, // Test registration of versioned factory that also uses deprecated names TEST(RegistryTest, DEPRECATED_FEATURE_TEST(VersionedWithDeprecatedNamesFactory)) { - EXPECT_EQ("testing.published.versioned.instead_name", - Envoy::Registry::FactoryRegistry::getFactory( - "testing.published.versioned.deprecated_name") - ->name()); - EXPECT_LOG_CONTAINS("warn", - fmt::format("Using deprecated extension name '{}' for '{}'.", - "testing.published.versioned.deprecated_name", - "testing.published.versioned.instead_name"), - Envoy::Registry::FactoryRegistry::getFactory( - "testing.published.versioned.deprecated_name") - ->name()); + EXPECT_EQ(nullptr, Envoy::Registry::FactoryRegistry::getFactory( + "testing.published.versioned.deprecated_name")); + const auto& factories = Envoy::Registry::FactoryCategoryRegistry::registeredFactories(); auto version = factories.find("testing.published") ->second->getFactoryVersion("testing.published.versioned.instead_name"); diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index 48c263af84c80..cd46df99cdb6b 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -341,6 +341,18 @@ TEST_F(SubscriptionFactoryTest, GrpcCollectionSubscriptionUnsupportedApiType) { EnvoyException, "Unknown xdstp:// transport API type in api_type: GRPC"); } +TEST_F(SubscriptionFactoryTest, GrpcCollectionSubscriptionUnsupportedConfigSpecifierType) { + envoy::config::core::v3::ConfigSource config; + config.set_path("/path/foo/bar"); + EXPECT_THROW_WITH_REGEX( + collectionSubscriptionFromUrl( + "xdstp://foo/envoy.config.endpoint.v3.ClusterLoadAssignment/bar", config) + ->start({}), + EnvoyException, + "Missing or not supported config source specifier in envoy::config::core::v3::ConfigSource " + "for a collection. Only ADS and gRPC in delta-xDS mode are supported."); +} + TEST_F(SubscriptionFactoryTest, GrpcCollectionAggregatedSubscription) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); diff --git a/test/common/config/type_to_endpoint_test.cc b/test/common/config/type_to_endpoint_test.cc index 0a1d877bb6131..4785f35f8b4c2 100644 --- a/test/common/config/type_to_endpoint_test.cc +++ b/test/common/config/type_to_endpoint_test.cc @@ -20,34 +20,26 @@ TEST(TypeToEndpoint, All) { envoy::service::route::v3::RdsDummy _v3_rds_dummy; // Delta gRPC endpoints. - EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - envoy::config::core::v3::ApiVersion::AUTO) - .full_name()); - EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V3) - .full_name()); + EXPECT_EQ( + "envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); + EXPECT_EQ( + "envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); // SotW gRPC endpoints. - EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - envoy::config::core::v3::ApiVersion::AUTO) - .full_name()); - EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V3) - .full_name()); + EXPECT_EQ( + "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); + EXPECT_EQ( + "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); // REST endpoints. EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - envoy::config::core::v3::ApiVersion::AUTO) - .full_name()); + restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V3) - .full_name()); + restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); } } // namespace diff --git a/test/common/crypto/BUILD b/test/common/crypto/BUILD index a7243b2309f04..a3d8272da24e2 100644 --- a/test/common/crypto/BUILD +++ b/test/common/crypto/BUILD @@ -22,7 +22,6 @@ envoy_cc_test( "//source/common/buffer:buffer_lib", "//source/common/common:hex_lib", "//source/common/crypto:utility_lib", - "//source/extensions/common/crypto:utility_lib", ], ) @@ -35,7 +34,7 @@ envoy_cc_fuzz_test( name = "get_sha_256_digest_fuzz_test", srcs = ["get_sha_256_digest_fuzz_test.cc"], corpus = "get_sha_256_digest_corpus", - deps = ["//source/extensions/common/crypto:utility_lib"], + deps = ["//source/common/crypto:utility_lib"], ) envoy_cc_fuzz_test( @@ -47,6 +46,5 @@ envoy_cc_fuzz_test( ":verify_signature_fuzz_proto_cc_proto", "//source/common/common:hex_lib", "//source/common/crypto:utility_lib", - "//source/extensions/common/crypto:utility_lib", ], ) diff --git a/test/common/crypto/utility_test.cc b/test/common/crypto/utility_test.cc index e30dda5bf7679..46eabc6b03cb8 100644 --- a/test/common/crypto/utility_test.cc +++ b/test/common/crypto/utility_test.cc @@ -1,7 +1,7 @@ #include "source/common/buffer/buffer_impl.h" #include "source/common/common/hex.h" +#include "source/common/crypto/crypto_impl.h" #include "source/common/crypto/utility.h" -#include "source/extensions/common/crypto/crypto_impl.h" #include "gtest/gtest.h" diff --git a/test/common/filter/BUILD b/test/common/filter/BUILD index 4d9b44b7f211f..1bc00f1e8c84b 100644 --- a/test/common/filter/BUILD +++ b/test/common/filter/BUILD @@ -15,8 +15,8 @@ envoy_cc_test( "//source/common/config:utility_lib", "//source/common/filter:config_discovery_lib", "//source/common/json:json_loader_lib", - "//source/extensions/filters/http/health_check:config", "//source/extensions/filters/http/router:config", + "//test/integration/filters:add_body_filter_config_lib", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/server:server_mocks", diff --git a/test/common/filter/config_discovery_impl_test.cc b/test/common/filter/config_discovery_impl_test.cc index ede45f2577257..7eebb2b237a02 100644 --- a/test/common/filter/config_discovery_impl_test.cc +++ b/test/common/filter/config_discovery_impl_test.cc @@ -361,8 +361,8 @@ TEST_F(FilterConfigDiscoveryImplTest, DualProvidersInvalid) { - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig name: foo typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false + "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig + body_size: 10 )EOF"; const auto response = TestUtility::parseYaml(response_yaml); @@ -372,8 +372,7 @@ TEST_F(FilterConfigDiscoveryImplTest, DualProvidersInvalid) { EXPECT_THROW_WITH_MESSAGE( callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()), EnvoyException, - "Error: filter config has type URL envoy.extensions.filters.http.health_check.v3.HealthCheck " - "but " + "Error: filter config has type URL test.integration.filters.AddBodyFilterConfig but " "expect envoy.extensions.filters.http.router.v3.Router."); EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); } diff --git a/test/common/grpc/async_client_manager_impl_test.cc b/test/common/grpc/async_client_manager_impl_test.cc index fc8a365f98f9c..c0fe6baddae8d 100644 --- a/test/common/grpc/async_client_manager_impl_test.cc +++ b/test/common/grpc/async_client_manager_impl_test.cc @@ -39,14 +39,7 @@ class AsyncClientManagerImplTest : public testing::Test { TEST_F(AsyncClientManagerImplTest, EnvoyGrpcOk) { envoy::config::core::v3::GrpcService grpc_service; grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); - - Upstream::ClusterManager::ClusterInfoMaps cluster_maps; - Upstream::MockClusterMockPrioritySet cluster; - cluster_maps.active_clusters_.emplace("foo", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_maps)); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()); - + EXPECT_CALL(cm_, checkActiveStaticCluster("foo")).WillOnce(Return()); async_client_manager_.factoryForGrpcService(grpc_service, scope_, false); } @@ -89,30 +82,15 @@ TEST_F(AsyncClientManagerImplTest, EnableRawAsyncClientCache) { EXPECT_NE(foo_client1.get(), bar_client.get()); } -TEST_F(AsyncClientManagerImplTest, EnvoyGrpcUnknown) { - envoy::config::core::v3::GrpcService grpc_service; - grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); - - EXPECT_CALL(cm_, clusters()); - EXPECT_THROW_WITH_MESSAGE( - async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException, - "Unknown gRPC client cluster 'foo'"); -} - -TEST_F(AsyncClientManagerImplTest, EnvoyGrpcDynamicCluster) { +TEST_F(AsyncClientManagerImplTest, EnvoyGrpcInvalid) { envoy::config::core::v3::GrpcService grpc_service; grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); - - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("foo", cluster); - EXPECT_CALL(cm_, clusters()) - .WillOnce(Return(Upstream::ClusterManager::ClusterInfoMaps{cluster_map, {}})); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); + EXPECT_CALL(cm_, checkActiveStaticCluster("foo")).WillOnce(Invoke([](const std::string&) { + throw EnvoyException("fake exception"); + })); EXPECT_THROW_WITH_MESSAGE( async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException, - "gRPC client cluster 'foo' is not static"); + "fake exception"); } TEST_F(AsyncClientManagerImplTest, GoogleGrpc) { @@ -187,11 +165,11 @@ TEST_F(AsyncClientManagerImplTest, GoogleGrpcIllegalCharsInValue) { #endif } -TEST_F(AsyncClientManagerImplTest, EnvoyGrpcUnknownOk) { +TEST_F(AsyncClientManagerImplTest, EnvoyGrpcUnknownSkipClusterCheck) { envoy::config::core::v3::GrpcService grpc_service; grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); - EXPECT_CALL(cm_, clusters()).Times(0); + EXPECT_CALL(cm_, checkActiveStaticCluster(_)).Times(0); ASSERT_NO_THROW(async_client_manager_.factoryForGrpcService(grpc_service, scope_, true)); } diff --git a/test/common/http/BUILD b/test/common/http/BUILD index edd7e75664edf..17512c3753da6 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -446,6 +446,7 @@ envoy_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/server:transport_socket_factory_context_mocks", + "//test/test_common:threadsafe_singleton_injector_lib", "//source/common/quic:quic_factory_lib", "//source/common/quic:quic_transport_socket_factory_lib", "//source/common/quic:client_connection_factory_lib", @@ -481,6 +482,7 @@ envoy_cc_test( "//source/common/http:alternate_protocols_cache", "//source/common/singleton:manager_impl_lib", "//test/mocks:common_lib", + "//test/mocks/server:factory_context_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:simulated_time_system_lib", ], diff --git a/test/common/http/alternate_protocols_cache_impl_test.cc b/test/common/http/alternate_protocols_cache_impl_test.cc index 4abc98ddb5be4..bc47f7ca27f2c 100644 --- a/test/common/http/alternate_protocols_cache_impl_test.cc +++ b/test/common/http/alternate_protocols_cache_impl_test.cc @@ -1,18 +1,31 @@ #include "source/common/http/alternate_protocols_cache_impl.h" +#include "test/mocks/common.h" #include "test/test_common/simulated_time_system.h" #include "gtest/gtest.h" +using testing::Invoke; +using testing::NiceMock; + namespace Envoy { namespace Http { namespace { class AlternateProtocolsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedTime { public: - AlternateProtocolsCacheImplTest() : protocols_(simTime()) {} + AlternateProtocolsCacheImplTest() : store_(new NiceMock()) {} + + void initialize() { + protocols_ = std::make_unique( + simTime(), std::unique_ptr(store_), max_entries_); + } + + const size_t max_entries_ = 10; + + MockKeyValueStore* store_; + std::unique_ptr protocols_; - AlternateProtocolsCacheImpl protocols_; const std::string hostname1_ = "hostname1"; const std::string hostname2_ = "hostname2"; const uint32_t port1_ = 1; @@ -29,93 +42,222 @@ class AlternateProtocolsCacheImplTest : public testing::Test, public Event::Test const AlternateProtocolsCacheImpl::Origin origin1_ = {https_, hostname1_, port1_}; const AlternateProtocolsCacheImpl::Origin origin2_ = {https_, hostname2_, port2_}; - const AlternateProtocolsCacheImpl::AlternateProtocol protocol1_ = {alpn1_, hostname1_, port1_, - expiration1_}; - const AlternateProtocolsCacheImpl::AlternateProtocol protocol2_ = {alpn2_, hostname2_, port2_, - expiration2_}; + AlternateProtocolsCacheImpl::AlternateProtocol protocol1_ = {alpn1_, hostname1_, port1_, + expiration1_}; + AlternateProtocolsCacheImpl::AlternateProtocol protocol2_ = {alpn2_, hostname2_, port2_, + expiration2_}; - const std::vector protocols1_ = {protocol1_}; - const std::vector protocols2_ = {protocol2_}; + std::vector protocols1_ = {protocol1_}; + std::vector protocols2_ = {protocol2_}; }; -TEST_F(AlternateProtocolsCacheImplTest, Init) { EXPECT_EQ(0, protocols_.size()); } +TEST_F(AlternateProtocolsCacheImplTest, Init) { + initialize(); + EXPECT_EQ(0, protocols_->size()); +} TEST_F(AlternateProtocolsCacheImplTest, SetAlternatives) { - EXPECT_EQ(0, protocols_.size()); - protocols_.setAlternatives(origin1_, protocols1_); - EXPECT_EQ(1, protocols_.size()); + initialize(); + EXPECT_EQ(0, protocols_->size()); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); + protocols_->setAlternatives(origin1_, protocols1_); + EXPECT_EQ(1, protocols_->size()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternatives) { - protocols_.setAlternatives(origin1_, protocols1_); + initialize(); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); + protocols_->setAlternatives(origin1_, protocols1_); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(protocols1_, protocols.ref()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterReplacement) { - protocols_.setAlternatives(origin1_, protocols1_); - protocols_.setAlternatives(origin1_, protocols2_); + initialize(); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); + protocols_->setAlternatives(origin1_, protocols1_); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn2=\"hostname2:2\"; ma=10")); + protocols_->setAlternatives(origin1_, protocols2_); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(protocols2_, protocols.ref()); EXPECT_NE(protocols1_, protocols.ref()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesForMultipleOrigins) { - protocols_.setAlternatives(origin1_, protocols1_); - protocols_.setAlternatives(origin2_, protocols2_); + initialize(); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); + protocols_->setAlternatives(origin1_, protocols1_); + EXPECT_CALL(*store_, addOrUpdate("https://hostname2:2", "alpn2=\"hostname2:2\"; ma=10")); + protocols_->setAlternatives(origin2_, protocols2_); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(protocols1_, protocols.ref()); - protocols = protocols_.findAlternatives(origin2_); + protocols = protocols_->findAlternatives(origin2_); EXPECT_EQ(protocols2_, protocols.ref()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterExpiration) { - protocols_.setAlternatives(origin1_, protocols1_); + initialize(); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); + protocols_->setAlternatives(origin1_, protocols1_); simTime().setMonotonicTime(expiration1_ + Seconds(1)); + EXPECT_CALL(*store_, remove("https://hostname1:1")); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_FALSE(protocols.has_value()); - EXPECT_EQ(0, protocols_.size()); + EXPECT_EQ(0, protocols_->size()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterPartialExpiration) { - protocols_.setAlternatives(origin1_, {protocol1_, protocol2_}); + initialize(); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", + "alpn1=\"hostname1:1\"; ma=5,alpn2=\"hostname2:2\"; ma=10")); + std::vector both = {protocol1_, protocol2_}; + protocols_->setAlternatives(origin1_, both); simTime().setMonotonicTime(expiration1_ + Seconds(1)); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn2=\"hostname2:2\"; ma=10")); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(protocols2_.size(), protocols->size()); EXPECT_EQ(protocols2_, protocols.ref()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterTruncation) { - AlternateProtocolsCacheImpl::AlternateProtocol protocol = protocol1_; - + initialize(); std::vector expected_protocols; for (size_t i = 0; i < 10; ++i) { - protocol.port_++; - expected_protocols.push_back(protocol); + protocol1_.port_++; + expected_protocols.push_back(protocol1_); } std::vector full_protocols = expected_protocols; - protocol.port_++; - full_protocols.push_back(protocol); - full_protocols.push_back(protocol); + protocol1_.port_++; + full_protocols.push_back(protocol1_); + full_protocols.push_back(protocol1_); - protocols_.setAlternatives(origin1_, full_protocols); + protocols_->setAlternatives(origin1_, full_protocols); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(10, protocols->size()); EXPECT_EQ(expected_protocols, protocols.ref()); } +TEST_F(AlternateProtocolsCacheImplTest, ToAndFromOriginString) { + initialize(); + std::string origin_str = "https://hostname1:1"; + absl::optional origin = + AlternateProtocolsCacheImpl::stringToOrigin(origin_str); + ASSERT_TRUE(origin.has_value()); + EXPECT_EQ(1, origin.value().port_); + EXPECT_EQ("https", origin.value().scheme_); + EXPECT_EQ("hostname1", origin.value().hostname_); + std::string output = AlternateProtocolsCacheImpl::originToString(origin.value()); + EXPECT_EQ(origin_str, output); + + // Test with no scheme or port. + std::string origin_str2 = "://:1"; + absl::optional origin2 = + AlternateProtocolsCacheImpl::stringToOrigin(origin_str2); + ASSERT_TRUE(origin2.has_value()); + EXPECT_EQ(1, origin2.value().port_); + EXPECT_EQ("", origin2.value().scheme_); + EXPECT_EQ("", origin2.value().hostname_); + std::string output2 = AlternateProtocolsCacheImpl::originToString(origin2.value()); + EXPECT_EQ(origin_str2, output2); + + // No port. + EXPECT_TRUE(!AlternateProtocolsCacheImpl::stringToOrigin("https://").has_value()); + // Non-numeric port. + EXPECT_TRUE(!AlternateProtocolsCacheImpl::stringToOrigin("://asd:dsa").has_value()); + // Negative port. + EXPECT_TRUE(!AlternateProtocolsCacheImpl::stringToOrigin("https://:-1").has_value()); +} + +TEST_F(AlternateProtocolsCacheImplTest, MaxEntries) { + initialize(); + EXPECT_EQ(0, protocols_->size()); + const std::string hostname = "hostname"; + for (uint32_t i = 0; i <= max_entries_; ++i) { + const AlternateProtocolsCache::Origin origin = {https_, hostname, i}; + AlternateProtocolsCache::AlternateProtocol protocol = {alpn1_, hostname, i, expiration1_}; + std::vector protocols = {protocol}; + EXPECT_CALL(*store_, addOrUpdate(absl::StrCat("https://hostname:", i), + absl::StrCat("alpn1=\"hostname:", i, "\"; ma=5"))); + if (i == max_entries_) { + EXPECT_CALL(*store_, remove("https://hostname:0")); + } + protocols_->setAlternatives(origin, protocols); + } +} + +TEST_F(AlternateProtocolsCacheImplTest, ToAndFromString) { + initialize(); + auto testAltSvc = [&](const std::string& original_alt_svc, + const std::string& expected_alt_svc) -> void { + absl::optional> protocols = + AlternateProtocolsCacheImpl::protocolsFromString(original_alt_svc, simTime(), true); + ASSERT(protocols.has_value()); + ASSERT_GE(protocols.value().size(), 1); + + AlternateProtocolsCache::AlternateProtocol& protocol = protocols.value()[0]; + EXPECT_EQ("h3-29", protocol.alpn_); + EXPECT_EQ("", protocol.hostname_); + EXPECT_EQ(443, protocol.port_); + auto duration = std::chrono::duration_cast(protocol.expiration_ - + simTime().monotonicTime()); + EXPECT_EQ(86400, duration.count()); + + if (protocols.value().size() == 2) { + AlternateProtocolsCache::AlternateProtocol& protocol2 = protocols.value()[1]; + EXPECT_EQ("h3", protocol2.alpn_); + EXPECT_EQ("", protocol2.hostname_); + EXPECT_EQ(443, protocol2.port_); + duration = std::chrono::duration_cast(protocol2.expiration_ - + simTime().monotonicTime()); + EXPECT_EQ(60, duration.count()); + } + + std::string alt_svc = + AlternateProtocolsCacheImpl::protocolsToStringForCache(protocols.value(), simTime()); + EXPECT_EQ(expected_alt_svc, alt_svc); + }; + + testAltSvc("h3-29=\":443\"; ma=86400", "h3-29=\":443\"; ma=86400"); + testAltSvc("h3-29=\":443\"; ma=86400,h3=\":443\"; ma=60", + "h3-29=\":443\"; ma=86400,h3=\":443\"; ma=60"); + + // Test once more to make sure we handle time advancing correctly. + // the absolute expiration time in testAltSvc is expected to be 86400 so add + // 60s to the default max age. + simTime().setMonotonicTime(simTime().monotonicTime() + std::chrono::seconds(60)); + testAltSvc("h3-29=\":443\"; ma=86460", "h3-29=\":443\"; ma=86460"); +} + +TEST_F(AlternateProtocolsCacheImplTest, CacheLoad) { + EXPECT_CALL(*store_, iterate(_)).WillOnce(Invoke([&](KeyValueStore::ConstIterateCb fn) { + fn("foo", "bar"); + fn("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5"); + })); + + // When the cache is created, there should be a warning log for the bad cache + // entry. + EXPECT_LOG_CONTAINS("warn", "Unable to parse cache entry with key: foo value: bar", + { initialize(); }); + + EXPECT_CALL(*store_, addOrUpdate(_, _)).Times(0); + OptRef> protocols = + protocols_->findAlternatives(origin1_); + ASSERT_TRUE(protocols.has_value()); + EXPECT_EQ(protocols1_, protocols.ref()); +} + } // namespace } // namespace Http } // namespace Envoy diff --git a/test/common/http/alternate_protocols_cache_manager_test.cc b/test/common/http/alternate_protocols_cache_manager_test.cc index c26570eac76fa..7fa9543c6cc01 100644 --- a/test/common/http/alternate_protocols_cache_manager_test.cc +++ b/test/common/http/alternate_protocols_cache_manager_test.cc @@ -1,11 +1,14 @@ #include "source/common/http/alternate_protocols_cache_manager_impl.h" #include "source/common/singleton/manager_impl.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/simulated_time_system.h" #include "gtest/gtest.h" +using testing::Return; + namespace Envoy { namespace Http { @@ -13,51 +16,71 @@ namespace { class AlternateProtocolsCacheManagerTest : public testing::Test, public Event::TestUsingSimulatedTime { public: - AlternateProtocolsCacheManagerTest() - : factory_(singleton_manager_, simTime(), tls_), manager_(factory_.get()) { + AlternateProtocolsCacheManagerTest() { options1_.set_name(name1_); options1_.mutable_max_entries()->set_value(max_entries1_); options2_.set_name(name2_); options2_.mutable_max_entries()->set_value(max_entries2_); } + void initialize() { + AlternateProtocolsData data(context_); + factory_ = std::make_unique(singleton_manager_, + tls_, data); + manager_ = factory_->get(); + } Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; + NiceMock context_; testing::NiceMock tls_; - Http::AlternateProtocolsCacheManagerFactoryImpl factory_; + std::unique_ptr factory_; AlternateProtocolsCacheManagerSharedPtr manager_; const std::string name1_ = "name1"; const std::string name2_ = "name2"; const int max_entries1_ = 10; const int max_entries2_ = 20; + Event::MockDispatcher dispatcher_; envoy::config::core::v3::AlternateProtocolsCacheOptions options1_; envoy::config::core::v3::AlternateProtocolsCacheOptions options2_; }; TEST_F(AlternateProtocolsCacheManagerTest, FactoryGet) { + initialize(); + EXPECT_NE(nullptr, manager_); - EXPECT_EQ(manager_, factory_.get()); + EXPECT_EQ(manager_, factory_->get()); } TEST_F(AlternateProtocolsCacheManagerTest, GetCache) { - AlternateProtocolsCacheSharedPtr cache = manager_->getCache(options1_); + initialize(); + AlternateProtocolsCacheSharedPtr cache = manager_->getCache(options1_, dispatcher_); EXPECT_NE(nullptr, cache); - EXPECT_EQ(cache, manager_->getCache(options1_)); + EXPECT_EQ(cache, manager_->getCache(options1_, dispatcher_)); +} + +TEST_F(AlternateProtocolsCacheManagerTest, GetCacheWithFlushingAndConcurrency) { + EXPECT_CALL(context_.options_, concurrency()).WillOnce(Return(5)); + options1_.mutable_key_value_store_config(); + initialize(); + EXPECT_THROW_WITH_REGEX(manager_->getCache(options1_, dispatcher_), EnvoyException, + "options has key value store but Envoy has concurrency = 5"); } TEST_F(AlternateProtocolsCacheManagerTest, GetCacheForDifferentOptions) { - AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_); - AlternateProtocolsCacheSharedPtr cache2 = manager_->getCache(options2_); + initialize(); + AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_, dispatcher_); + AlternateProtocolsCacheSharedPtr cache2 = manager_->getCache(options2_, dispatcher_); EXPECT_NE(nullptr, cache2); EXPECT_NE(cache1, cache2); } TEST_F(AlternateProtocolsCacheManagerTest, GetCacheForConflictingOptions) { - AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_); + initialize(); + AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_, dispatcher_); options2_.set_name(options1_.name()); EXPECT_THROW_WITH_REGEX( - manager_->getCache(options2_), EnvoyException, + manager_->getCache(options2_, dispatcher_), EnvoyException, "options specified alternate protocols cache 'name1' with different settings.*"); } diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index 5728e0fa31ada..28d78009c2fdb 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -1544,10 +1544,10 @@ TEST_F(AsyncClientImplTest, DumpState) { } // namespace // Must not be in anonymous namespace for friend to work. -class AsyncClientImplUnitTest : public testing::Test { +class AsyncClientImplUnitTest : public AsyncClientImplTest { public: std::unique_ptr route_impl_{new AsyncStreamImpl::RouteImpl( - "foo", absl::nullopt, + client_, absl::nullopt, Protobuf::RepeatedPtrField(), absl::nullopt)}; AsyncStreamImpl::NullVirtualHost vhost_; @@ -1559,7 +1559,7 @@ class AsyncClientImplUnitTest : public testing::Test { TestUtility::loadFromYaml(yaml_config, retry_policy); route_impl_ = std::make_unique( - "foo", absl::nullopt, + client_, absl::nullopt, Protobuf::RepeatedPtrField(), std::move(retry_policy)); } @@ -1567,7 +1567,6 @@ class AsyncClientImplUnitTest : public testing::Test { // Test the extended fake route that AsyncClient uses. TEST_F(AsyncClientImplUnitTest, NullRouteImplInitTest) { - auto& route_entry = *(route_impl_->routeEntry()); EXPECT_EQ(nullptr, route_impl_->decorator()); @@ -1598,7 +1597,6 @@ TEST_F(AsyncClientImplUnitTest, NullRouteImplInitTest) { } TEST_F(AsyncClientImplUnitTest, RouteImplInitTestWithRetryPolicy) { - const std::string yaml = R"EOF( per_try_timeout: 30s num_retries: 10 diff --git a/test/common/http/conn_pool_grid_test.cc b/test/common/http/conn_pool_grid_test.cc index 873a813be4501..69528416002db 100644 --- a/test/common/http/conn_pool_grid_test.cc +++ b/test/common/http/conn_pool_grid_test.cc @@ -14,6 +14,7 @@ #include "test/mocks/ssl/mocks.h" #include "test/mocks/upstream/cluster_info.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/threadsafe_singleton_injector.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -97,11 +98,11 @@ class ConnectivityGridForTest : public ConnectivityGrid { }; namespace { -class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public testing::Test { +class ConnectivityGridTest : public Event::TestUsingSimulatedTime, public testing::Test { public: - ConnectivityGridTestBase(bool use_alternate_protocols) + ConnectivityGridTest() : options_({Http::Protocol::Http11, Http::Protocol::Http2, Http::Protocol::Http3}), - alternate_protocols_(maybeCreateAlternateProtocolsCacheImpl(use_alternate_protocols)), + alternate_protocols_(std::make_shared(simTime(), nullptr, 10)), quic_stat_names_(store_.symbolTable()), grid_(dispatcher_, random_, Upstream::makeTestHost(cluster_, "hostname", "tcp://127.0.0.1:9000", simTime()), @@ -119,12 +120,12 @@ class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public te if (!use_alternate_protocols) { return nullptr; } - return std::make_shared(simTime()); + return std::make_shared(simTime(), nullptr, 10); } void addHttp3AlternateProtocol() { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"h3", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); } @@ -149,25 +150,12 @@ class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public te NiceMock encoder_; }; -// Tests of the Grid in which no alternate protocols cache is configured. -class ConnectivityGridTest : public ConnectivityGridTestBase { -public: - ConnectivityGridTest() : ConnectivityGridTestBase(false) {} -}; - -// Tests of the Grid in which an alternate protocols cache is configured. -class ConnectivityGridWithAlternateProtocolsCacheImplTest : public ConnectivityGridTestBase { -public: - ConnectivityGridWithAlternateProtocolsCacheImplTest() : ConnectivityGridTestBase(true) {} -}; - // Test the first pool successfully connecting. TEST_F(ConnectivityGridTest, Success) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); - EXPECT_LOG_CONTAINS("trace", - "No alternate protocols cache. Attempting HTTP/3 to host 'hostname'.", - EXPECT_NE(grid_.newStream(decoder_, callbacks_), nullptr)); + EXPECT_NE(grid_.newStream(decoder_, callbacks_), nullptr); EXPECT_NE(grid_.first(), nullptr); EXPECT_EQ(grid_.second(), nullptr); @@ -180,6 +168,7 @@ TEST_F(ConnectivityGridTest, Success) { // Test the first pool successfully connecting under the stack of newStream. TEST_F(ConnectivityGridTest, ImmediateSuccess) { + addHttp3AlternateProtocol(); grid_.immediate_success_ = true; EXPECT_CALL(callbacks_.pool_ready_, ready()); @@ -190,6 +179,7 @@ TEST_F(ConnectivityGridTest, ImmediateSuccess) { // Test the first pool failing and the second connecting. TEST_F(ConnectivityGridTest, FailureThenSuccessSerial) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); EXPECT_LOG_CONTAINS("trace", "first pool attempting to create a new stream to host 'hostname'", @@ -219,6 +209,7 @@ TEST_F(ConnectivityGridTest, FailureThenSuccessSerial) { // Test both connections happening in parallel and the second connecting. TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelSecondConnects) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); // This timer will be returned and armed as the grid creates the wrapper's failover timer. @@ -249,6 +240,7 @@ TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelSecondConnects) { // Test both connections happening in parallel and the first connecting. TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelFirstConnects) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); // This timer will be returned and armed as the grid creates the wrapper's failover timer. @@ -278,6 +270,7 @@ TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelFirstConnects) { // Test both connections happening in parallel and the second connecting before // the first eventually fails. TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelSecondConnectsFirstFail) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); // This timer will be returned and armed as the grid creates the wrapper's failover timer. @@ -309,6 +302,7 @@ TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelSecondConnectsFirstFail) // Test that after the first pool fails, subsequent connections will // successfully fail over to the second pool (the iterators work as intended) TEST_F(ConnectivityGridTest, FailureThenSuccessForMultipleConnectionsSerial) { + addHttp3AlternateProtocol(); NiceMock callbacks2; NiceMock decoder2; // Kick off two new streams. @@ -334,6 +328,7 @@ TEST_F(ConnectivityGridTest, FailureThenSuccessForMultipleConnectionsSerial) { // Test double failure under the stack of newStream. TEST_F(ConnectivityGridTest, ImmediateDoubleFailure) { + addHttp3AlternateProtocol(); grid_.immediate_failure_ = true; EXPECT_CALL(callbacks_.pool_failure_, ready()); EXPECT_EQ(grid_.newStream(decoder_, callbacks_), nullptr); @@ -342,6 +337,7 @@ TEST_F(ConnectivityGridTest, ImmediateDoubleFailure) { // Test both connections happening in parallel and both failing. TEST_F(ConnectivityGridTest, TimeoutDoubleFailureParallel) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); // This timer will be returned and armed as the grid creates the wrapper's failover timer. @@ -370,6 +366,7 @@ TEST_F(ConnectivityGridTest, TimeoutDoubleFailureParallel) { // Test cancellation TEST_F(ConnectivityGridTest, TestCancel) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); auto cancel = grid_.newStream(decoder_, callbacks_); @@ -382,6 +379,7 @@ TEST_F(ConnectivityGridTest, TestCancel) { // Make sure drains get sent to all active pools. TEST_F(ConnectivityGridTest, Drain) { + addHttp3AlternateProtocol(); grid_.drainConnections(Envoy::ConnectionPool::DrainBehavior::DrainExistingConnections); // Synthetically create a pool. @@ -404,6 +402,7 @@ TEST_F(ConnectivityGridTest, Drain) { // Make sure drain callbacks work as expected. TEST_F(ConnectivityGridTest, DrainCallbacks) { + addHttp3AlternateProtocol(); // Synthetically create both pools. grid_.createNextPool(); grid_.createNextPool(); @@ -452,6 +451,7 @@ TEST_F(ConnectivityGridTest, DrainCallbacks) { // Make sure idle callbacks work as expected. TEST_F(ConnectivityGridTest, IdleCallbacks) { + addHttp3AlternateProtocol(); // Synthetically create both pools. grid_.createNextPool(); grid_.createNextPool(); @@ -484,6 +484,7 @@ TEST_F(ConnectivityGridTest, IdleCallbacks) { // Ensure drain callbacks aren't called during grid teardown. TEST_F(ConnectivityGridTest, NoDrainOnTeardown) { + addHttp3AlternateProtocol(); grid_.createNextPool(); bool drain_received = false; @@ -499,7 +500,7 @@ TEST_F(ConnectivityGridTest, NoDrainOnTeardown) { } // Test that when HTTP/3 is broken then the HTTP/3 pool is skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessAfterBroken) { +TEST_F(ConnectivityGridTest, SuccessAfterBroken) { addHttp3AlternateProtocol(); grid_.markHttp3Broken(); EXPECT_EQ(grid_.first(), nullptr); @@ -517,7 +518,7 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessAfterBroken) } // Test the HTTP/3 pool successfully connecting when HTTP/3 is available. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, Success) { +TEST_F(ConnectivityGridTest, SuccessWithAltSvc) { addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); @@ -533,7 +534,7 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, Success) { } // Test that when HTTP/3 is not available then the HTTP/3 pool is skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3) { +TEST_F(ConnectivityGridTest, SuccessWithoutHttp3) { EXPECT_EQ(grid_.first(), nullptr); EXPECT_LOG_CONTAINS("trace", @@ -549,9 +550,9 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3) } // Test that when HTTP/3 is not available then the HTTP/3 pool is skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithExpiredHttp3) { +TEST_F(ConnectivityGridTest, SuccessWithExpiredHttp3) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"h3-29", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); simTime().setMonotonicTime(simTime().monotonicTime() + Seconds(10)); @@ -572,9 +573,9 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithExpiredHt // Test that when the alternate protocol specifies a different host, then the HTTP/3 pool is // skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3NoMatchingHostname) { +TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingHostname) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"h3-29", "otherhostname", origin.port_, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); @@ -593,9 +594,9 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3N // Test that when the alternate protocol specifies a different port, then the HTTP/3 pool is // skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3NoMatchingPort) { +TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingPort) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"h3-29", "", origin.port_ + 1, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); @@ -613,9 +614,9 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3N } // Test that when the alternate protocol specifies an invalid ALPN, then the HTTP/3 pool is skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3NoMatchingAlpn) { +TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingAlpn) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"http/2", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); @@ -680,6 +681,55 @@ TEST_F(ConnectivityGridTest, RealGrid) { auto optional_it3 = ConnectivityGridForTest::forceCreateNextPool(grid); ASSERT_FALSE(optional_it3.has_value()); } + +TEST_F(ConnectivityGridTest, ConnectionCloseDuringCreation) { + EXPECT_CALL(*cluster_, connectTimeout()).WillRepeatedly(Return(std::chrono::seconds(10))); + + testing::InSequence s; + dispatcher_.allow_null_callback_ = true; + // Set the cluster up to have a quic transport socket. + Envoy::Ssl::ClientContextConfigPtr config(new NiceMock()); + NiceMock factory_context; + Ssl::ClientContextSharedPtr ssl_context(new Ssl::MockClientContext()); + EXPECT_CALL(factory_context.context_manager_, createSslClientContext(_, _, _)) + .WillOnce(Return(ssl_context)); + auto factory = + std::make_unique(std::move(config), factory_context); + factory->initialize(); + ASSERT_FALSE(factory->usesProxyProtocolOptions()); + auto& matcher = + static_cast(*cluster_->transport_socket_matcher_); + EXPECT_CALL(matcher, resolve(_)) + .WillRepeatedly( + Return(Upstream::TransportSocketMatcher::MatchData(*factory, matcher.stats_, "test"))); + + ConnectivityGrid grid(dispatcher_, random_, + Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000", simTime()), + Upstream::ResourcePriority::Default, socket_options_, + transport_socket_options_, state_, simTime(), alternate_protocols_, + std::chrono::milliseconds(300), options_, quic_stat_names_, store_); + + // Create the HTTP/3 pool. + auto optional_it1 = ConnectivityGridForTest::forceCreateNextPool(grid); + ASSERT_TRUE(optional_it1.has_value()); + EXPECT_EQ("HTTP/3", (**optional_it1)->protocolDescription()); + + Api::MockOsSysCalls os_sys_calls; + TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + EXPECT_CALL(os_sys_calls, socket(_, _, _)).WillOnce(Return(Api::SysCallSocketResult{1, 0})); +#if defined(__APPLE__) || defined(WIN32) + EXPECT_CALL(os_sys_calls, setsocketblocking(1, false)) + .WillOnce(Return(Api::SysCallIntResult{1, 0})); +#endif + EXPECT_CALL(os_sys_calls, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{1, 0})); + EXPECT_CALL(os_sys_calls, setsockopt_(_, _, _, _, _)).WillRepeatedly(Return(0)); + EXPECT_CALL(os_sys_calls, sendmsg(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 101})); + + EXPECT_CALL(os_sys_calls, close(1)).WillOnce(Return(Api::SysCallIntResult{0, 0})); + ConnectionPool::Cancellable* cancel = (**optional_it1)->newStream(decoder_, callbacks_); + EXPECT_EQ(nullptr, cancel); +} + #endif } // namespace diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 98c35fae521c1..ce4bb8a7ccd1e 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -423,8 +423,9 @@ TEST_P(HeaderMapImplTest, AllInlineHeaders) { INLINE_REQ_RESP_STRING_HEADERS(TEST_INLINE_STRING_HEADER_FUNCS) } { - // No request trailer O(1) headers. - } { + // No request trailer O(1) headers. + } + { auto header_map = ResponseHeaderMapImpl::create(); INLINE_RESP_STRING_HEADERS(TEST_INLINE_STRING_HEADER_FUNCS) INLINE_REQ_RESP_STRING_HEADERS(TEST_INLINE_STRING_HEADER_FUNCS) diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 7e9ed68e23df8..5951dced3199d 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -886,6 +886,34 @@ TEST(ValidateHeaders, HeaderNameWithUnderscores) { rejected)); } +TEST(ValidateHeaders, Connect) { + { + // Basic connect. + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":authority", "foo.com:80"}}; + EXPECT_EQ(Http::okStatus(), HeaderUtility::checkRequiredRequestHeaders(headers)); + } + { + // Extended connect. + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, + {":authority", "foo.com:80"}, + {":path", "/"}, + {":protocol", "websocket"}}; + EXPECT_EQ(Http::okStatus(), HeaderUtility::checkRequiredRequestHeaders(headers)); + } + { + // Missing path. + TestRequestHeaderMapImpl headers{ + {":method", "CONNECT"}, {":authority", "foo.com:80"}, {":protocol", "websocket"}}; + EXPECT_NE(Http::okStatus(), HeaderUtility::checkRequiredRequestHeaders(headers)); + } + { + // Missing protocol. + TestRequestHeaderMapImpl headers{ + {":method", "CONNECT"}, {":authority", "foo.com:80"}, {":path", "/"}}; + EXPECT_NE(Http::okStatus(), HeaderUtility::checkRequiredRequestHeaders(headers)); + } +} + TEST(ValidateHeaders, ContentLength) { bool should_close_connection; EXPECT_EQ(HeaderUtility::HeaderValidationResult::ACCEPT, diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 433acb4473a11..d70c3be17a8b2 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -2757,7 +2757,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { NiceMock response_decoder; Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); - TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":path", "/"}, {":authority", "host"}}; + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":authority", "host"}}; EXPECT_TRUE(request_encoder.encodeHeaders(headers, true).ok()); // Send response headers @@ -2788,7 +2788,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { NiceMock response_decoder; Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); - TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":path", "/"}, {":authority", "host"}}; + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":authority", "host"}}; EXPECT_TRUE(request_encoder.encodeHeaders(headers, true).ok()); // Send response headers and payload @@ -2807,7 +2807,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { NiceMock response_decoder; Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); - TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":path", "/"}, {":authority", "host"}}; + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":authority", "host"}}; EXPECT_TRUE(request_encoder.encodeHeaders(headers, true).ok()); EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 948cd2ff4853e..51c460a4d2f36 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -1015,7 +1015,7 @@ TEST_P(Http2CodecImplTest, IdlePing) { // Advance time past 1s. This time the ping should be sent, and the timeout // alarm enabled. RequestEncoder* request_encoder2 = &client_->newStream(response_decoder_); - client_connection_.dispatcher_.time_system_.advanceTimeAsyncImpl(std::chrono::seconds(2)); + client_connection_.dispatcher_.globalTimeSystem().advanceTimeAsyncImpl(std::chrono::seconds(2)); EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(0); EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); EXPECT_TRUE(request_encoder2->encodeHeaders(request_headers, true).ok()); @@ -3037,6 +3037,7 @@ TEST_P(Http2CodecImplTest, ConnectTest) { TestRequestHeaderMapImpl request_headers; HttpTestUtility::addDefaultHeaders(request_headers); request_headers.setReferenceKey(Headers::get().Method, Http::Headers::get().MethodValues.Connect); + request_headers.setReferenceKey(Headers::get().Protocol, "bytestream"); TestRequestHeaderMapImpl expected_headers; HttpTestUtility::addDefaultHeaders(expected_headers); expected_headers.setReferenceKey(Headers::get().Method, diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 766fd52200b57..e602f2108df0c 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -91,6 +91,69 @@ TEST(HttpUtility, parseQueryString) { "bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29")); } +TEST(HttpUtility, stripQueryString) { + EXPECT_EQ(Utility::stripQueryString(HeaderString("/")), "/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/?")), "/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/?x=1")), "/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/?x=1&y=2")), "/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo")), "/foo"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo?")), "/foo"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo?hello=there")), "/foo"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo?hello=there&good=bye")), "/foo"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/?")), "/foo/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/?x=1")), "/foo/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar")), "/foo/bar"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar?")), "/foo/bar"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar?a=b")), "/foo/bar"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar?a=b&b=c")), "/foo/bar"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar/")), "/foo/bar/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar/?")), "/foo/bar/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar/?x=1")), "/foo/bar/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar/?x=1&y=2")), "/foo/bar/"); +} + +TEST(HttpUtility, replaceQueryString) { + // Replace with nothing + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/"), Utility::QueryParams()), "/"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/?"), Utility::QueryParams()), "/"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/?x=0"), Utility::QueryParams()), "/"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/a"), Utility::QueryParams()), "/a"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/a/"), Utility::QueryParams()), "/a/"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/a/?y=5"), Utility::QueryParams()), "/a/"); + // Replace with x=1 + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/"), Utility::QueryParams({{"x", "1"}})), + "/?x=1"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/?"), Utility::QueryParams({{"x", "1"}})), + "/?x=1"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/?x=0"), Utility::QueryParams({{"x", "1"}})), + "/?x=1"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/a?x=0"), Utility::QueryParams({{"x", "1"}})), + "/a?x=1"); + EXPECT_EQ( + Utility::replaceQueryString(HeaderString("/a/?x=0"), Utility::QueryParams({{"x", "1"}})), + "/a/?x=1"); + // More replacements + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo"), + Utility::QueryParams({{"x", "1"}, {"z", "3"}})), + "/foo?x=1&z=3"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo?z=2"), + Utility::QueryParams({{"x", "1"}, {"y", "5"}})), + "/foo?x=1&y=5"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo?y=9"), + Utility::QueryParams({{"x", "1"}, {"y", "5"}})), + "/foo?x=1&y=5"); + // More path components + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo/bar?"), + Utility::QueryParams({{"x", "1"}, {"y", "5"}})), + "/foo/bar?x=1&y=5"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo/bar?y=9&a=b"), + Utility::QueryParams({{"x", "1"}, {"y", "5"}})), + "/foo/bar?x=1&y=5"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo/bar?y=11&z=7"), + Utility::QueryParams({{"a", "b"}, {"x", "1"}, {"y", "5"}})), + "/foo/bar?a=b&x=1&y=5"); +} + TEST(HttpUtility, getResponseStatus) { EXPECT_THROW(Utility::getResponseStatus(TestResponseHeaderMapImpl{}), CodecClientException); EXPECT_EQ(200U, Utility::getResponseStatus(TestResponseHeaderMapImpl{{":status", "200"}})); @@ -1008,6 +1071,37 @@ TEST(HttpUtility, CheckIsIpAddress) { } } +TEST(HttpUtility, TestConvertCoreToRouteRetryPolicy) { + const std::string core_policy = R"( +num_retries: 10 +)"; + + envoy::config::core::v3::RetryPolicy core_retry_policy; + TestUtility::loadFromYaml(core_policy, core_retry_policy); + + const envoy::config::route::v3::RetryPolicy route_retry_policy = + Utility::convertCoreToRouteRetryPolicy(core_retry_policy, + "5xx,gateway-error,connect-failure,reset"); + EXPECT_EQ(route_retry_policy.num_retries().value(), 10); + EXPECT_EQ(route_retry_policy.per_try_timeout().seconds(), 10); + EXPECT_EQ(route_retry_policy.retry_back_off().base_interval().seconds(), 1); + EXPECT_EQ(route_retry_policy.retry_back_off().max_interval().seconds(), 10); + EXPECT_EQ(route_retry_policy.retry_on(), "5xx,gateway-error,connect-failure,reset"); + + const std::string core_policy2 = R"( +retry_back_off: + base_interval: 32s + max_interval: 1s +num_retries: 10 +)"; + + envoy::config::core::v3::RetryPolicy core_retry_policy2; + TestUtility::loadFromYaml(core_policy2, core_retry_policy2); + EXPECT_THROW_WITH_MESSAGE(Utility::convertCoreToRouteRetryPolicy(core_retry_policy2, "5xx"), + EnvoyException, + "max_interval must be greater than or equal to the base_interval"); +} + // Validates TE header is stripped if it contains an unsupported value // Also validate the behavior if a nominated header does not exist TEST(HttpUtility, TestTeHeaderGzipTrailersSanitized) { diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 568cf67d26bab..2d32f100eca45 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -181,7 +181,6 @@ envoy_cc_test( "//source/common/tcp_proxy", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", - "//source/extensions/filters/network/ratelimit:ratelimit_lib", "//test/common/upstream:utility_lib", "//test/extensions/filters/common/ratelimit:ratelimit_mocks", "//test/mocks/buffer:buffer_mocks", @@ -193,7 +192,6 @@ envoy_cc_test( "//test/mocks/upstream:host_mocks", "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/common/network/addr_family_aware_socket_option_impl_test.cc b/test/common/network/addr_family_aware_socket_option_impl_test.cc index cdea9a34f4801..f74f2896d0571 100644 --- a/test/common/network/addr_family_aware_socket_option_impl_test.cc +++ b/test/common/network/addr_family_aware_socket_option_impl_test.cc @@ -23,6 +23,36 @@ class AddrFamilyAwareSocketOptionImplTest : public SocketOptionTest { } }; +// Different values for v4 and v6 +TEST_F(AddrFamilyAwareSocketOptionImplTest, DifferentV4AndV6OptionValue) { + AddrFamilyAwareSocketOptionImpl socket_option{ + envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1, + ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 2}; + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); + testSetSocketOptionSuccess(socket_option, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1, + {envoy::config::core::v3::SocketOption::STATE_PREBIND}); + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); + testSetSocketOptionSuccess(socket_option, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 2, + {envoy::config::core::v3::SocketOption::STATE_PREBIND}); +} + +// Different string values for v4 and v6. +TEST_F(AddrFamilyAwareSocketOptionImplTest, DifferentV4AndV6OptionData) { + AddrFamilyAwareSocketOptionImpl socket_option{ + envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), + "hello", ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), "world"}; + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); + EXPECT_EQ( + "hello", + socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND) + ->value_); + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); + EXPECT_EQ( + "world", + socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND) + ->value_); +} + // We fail to set the option when the underlying setsockopt syscall fails. TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionFailure) { EXPECT_CALL(socket_, ipVersion).WillRepeatedly(testing::Return(absl::nullopt)); diff --git a/test/common/network/apple_dns_impl_test.cc b/test/common/network/apple_dns_impl_test.cc index feb6b0ac89dda..bf9cc87508a8f 100644 --- a/test/common/network/apple_dns_impl_test.cc +++ b/test/common/network/apple_dns_impl_test.cc @@ -75,11 +75,36 @@ class AppleDnsImplTest : public testing::Test { EXPECT_EQ(expected_status, status); if (expected_results) { EXPECT_FALSE(results.empty()); + absl::optional is_v4{}; for (const auto& result : results) { - if (lookup_family == DnsLookupFamily::V4Only) { + switch (lookup_family) { + case DnsLookupFamily::V4Only: EXPECT_NE(nullptr, result.address_->ip()->ipv4()); - } else if (lookup_family == DnsLookupFamily::V6Only) { + break; + case DnsLookupFamily::V6Only: EXPECT_NE(nullptr, result.address_->ip()->ipv6()); + break; + // In CI these modes could return either V4 or V6 with the non-mocked API calls. But + // regardless of the family all returned addresses need to be one _or_ the other. + case DnsLookupFamily::V4Preferred: + case DnsLookupFamily::Auto: + // Set the expectation for subsequent responses based on the first one. + if (!is_v4.has_value()) { + if (result.address_->ip()->ipv4()) { + is_v4 = true; + } else { + is_v4 = false; + } + } + + if (is_v4.value()) { + EXPECT_NE(nullptr, result.address_->ip()->ipv4()); + } else { + EXPECT_NE(nullptr, result.address_->ip()->ipv6()); + } + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; } } } @@ -147,15 +172,25 @@ TEST_F(AppleDnsImplTest, LocalLookup) { dispatcher_->run(Event::Dispatcher::RunType::Block); } -TEST_F(AppleDnsImplTest, DnsIpAddressVersion) { +TEST_F(AppleDnsImplTest, DnsIpAddressVersionAuto) { EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::Auto, DnsResolver::ResolutionStatus::Success, true)); dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_F(AppleDnsImplTest, DnsIpAddressVersionV4Preferred) { + EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Success, true)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} +TEST_F(AppleDnsImplTest, DnsIpAddressVersionV4Only) { EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V4Only, DnsResolver::ResolutionStatus::Success, true)); dispatcher_->run(Event::Dispatcher::RunType::Block); +} +TEST_F(AppleDnsImplTest, DnsIpAddressVersionV6Only) { EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V6Only, DnsResolver::ResolutionStatus::Success, true)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -189,6 +224,10 @@ TEST_F(AppleDnsImplTest, DnsIpAddressVersionInvalid) { DnsResolver::ResolutionStatus::Failure, false)); dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_NE(nullptr, resolveWithExpectations("invalidDnsName", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Failure, false)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_NE(nullptr, resolveWithExpectations("invalidDnsName", DnsLookupFamily::V4Only, DnsResolver::ResolutionStatus::Failure, false)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -295,10 +334,8 @@ class AppleDnsImplFakeApiTest : public testing::Test { Network::Address::Ipv4Instance address(&addr4); absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll( // Have the API call synchronously call the provided callback. WithArgs<5, 6>(Invoke([&](DNSServiceGetAddrInfoReply callback, void* context) -> void { @@ -320,6 +357,82 @@ class AppleDnsImplFakeApiTest : public testing::Test { checkErrorStat(error_code); } + enum AddressType { V4, V6, Both }; + + void fallbackWith(DnsLookupFamily dns_lookup_family, AddressType address_type) { + const std::string hostname = "foo.com"; + sockaddr_in addr4; + addr4.sin_family = AF_INET; + EXPECT_EQ(1, inet_pton(AF_INET, "1.2.3.4", &addr4.sin_addr)); + addr4.sin_port = htons(6502); + Network::Address::Ipv4Instance address(&addr4); + + sockaddr_in6 addr6; + addr6.sin6_family = AF_INET6; + EXPECT_EQ(1, inet_pton(AF_INET6, "102:304:506:708:90a:b0c:d0e:f00", &addr6.sin6_addr)); + addr6.sin6_port = 0; + Network::Address::Ipv6Instance address_v6(addr6); + + DNSServiceGetAddrInfoReply reply_callback; + absl::Notification dns_callback_executed; + + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) + .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); + + EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); + EXPECT_CALL(dispatcher_, createFileEvent_(0, _, _, _)) + .WillOnce(Return(new NiceMock)); + + auto query = resolver_->resolve( + hostname, dns_lookup_family, + [&dns_callback_executed, dns_lookup_family, address_type]( + DnsResolver::ResolutionStatus status, std::list&& response) -> void { + EXPECT_EQ(DnsResolver::ResolutionStatus::Success, status); + EXPECT_EQ(1, response.size()); + + if (dns_lookup_family == DnsLookupFamily::Auto) { + if (address_type == AddressType::V4) { + EXPECT_NE(nullptr, response.front().address_->ip()->ipv4()); + } else { + EXPECT_NE(nullptr, response.front().address_->ip()->ipv6()); + } + } + + if (dns_lookup_family == DnsLookupFamily::V4Preferred) { + if (address_type == AddressType::V6) { + EXPECT_NE(nullptr, response.front().address_->ip()->ipv6()); + } else { + EXPECT_NE(nullptr, response.front().address_->ip()->ipv4()); + } + } + dns_callback_executed.Notify(); + }); + ASSERT_NE(nullptr, query); + + switch (address_type) { + case V4: + reply_callback(nullptr, kDNSServiceFlagsAdd, 0, kDNSServiceErr_NoError, hostname.c_str(), + address.sockAddr(), 30, query); + break; + case V6: + reply_callback(nullptr, kDNSServiceFlagsAdd, 0, kDNSServiceErr_NoError, hostname.c_str(), + address_v6.sockAddr(), 30, query); + break; + case Both: + reply_callback(nullptr, kDNSServiceFlagsAdd | kDNSServiceFlagsMoreComing, 0, + kDNSServiceErr_NoError, hostname.c_str(), address.sockAddr(), 30, query); + + reply_callback(nullptr, kDNSServiceFlagsAdd, 0, kDNSServiceErr_NoError, hostname.c_str(), + address_v6.sockAddr(), 30, query); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + dns_callback_executed.WaitForNotification(); + } + protected: MockDnsService dns_service_; TestThreadsafeSingletonInjector dns_service_injector_{&dns_service_}; @@ -341,10 +454,8 @@ TEST_F(AppleDnsImplFakeApiTest, ErrorInSocketAccess) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(-1)); @@ -378,10 +489,8 @@ TEST_F(AppleDnsImplFakeApiTest, InvalidFileEvent) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -415,10 +524,8 @@ TEST_F(AppleDnsImplFakeApiTest, ErrorInProcessResult) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -470,10 +577,8 @@ TEST_F(AppleDnsImplFakeApiTest, QuerySynchronousCompletion) { Network::Address::Ipv4Instance address(&addr4); absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll( // Have the API call synchronously call the provided callback. WithArgs<5, 6>(Invoke([&](DNSServiceGetAddrInfoReply callback, void* context) -> void { @@ -530,10 +635,8 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleAddresses) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -561,6 +664,30 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleAddresses) { dns_callback_executed.WaitForNotification(); } +TEST_F(AppleDnsImplFakeApiTest, AutoOnlyV6IfBothV6andV4) { + fallbackWith(DnsLookupFamily::Auto, AddressType::Both); +} + +TEST_F(AppleDnsImplFakeApiTest, AutoV6IfOnlyV6) { + fallbackWith(DnsLookupFamily::Auto, AddressType::V6); +} + +TEST_F(AppleDnsImplFakeApiTest, AutoV4IfOnlyV4) { + fallbackWith(DnsLookupFamily::Auto, AddressType::V4); +} + +TEST_F(AppleDnsImplFakeApiTest, V4PreferredOnlyV4IfBothV6andV4) { + fallbackWith(DnsLookupFamily::V4Preferred, AddressType::Both); +} + +TEST_F(AppleDnsImplFakeApiTest, V4PreferredV6IfOnlyV6) { + fallbackWith(DnsLookupFamily::V4Preferred, AddressType::V6); +} + +TEST_F(AppleDnsImplFakeApiTest, V4PreferredV4IfOnlyV4) { + fallbackWith(DnsLookupFamily::V4Preferred, AddressType::V4); +} + TEST_F(AppleDnsImplFakeApiTest, MultipleAddressesSecondOneFails) { const std::string hostname = "foo.com"; sockaddr_in addr4; @@ -572,10 +699,8 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleAddressesSecondOneFails) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -622,10 +747,8 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleQueries) { absl::Notification dns_callback_executed2; // Start first query. - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -693,10 +816,8 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleQueriesOneFails) { absl::Notification dns_callback_executed2; // Start first query. - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -757,10 +878,8 @@ TEST_F(AppleDnsImplFakeApiTest, ResultWithOnlyNonAdditiveReplies) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -792,10 +911,8 @@ TEST_F(AppleDnsImplFakeApiTest, ResultWithNullAddress) { Network::Address::Ipv4Instance address(&addr4); DNSServiceGetAddrInfoReply reply_callback; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -829,10 +946,8 @@ TEST_F(AppleDnsImplFakeApiTest, DeallocateOnDestruction) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll( SaveArg<5>(&reply_callback), WithArgs<0>(Invoke([](DNSServiceRef* ref) -> void { *ref = new _DNSServiceRef_t{}; })), diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index a7169ee003047..b666ded243ef4 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -419,6 +419,7 @@ TEST_P(ConnectionImplTest, SetServerTransportSocketTimeout) { mock_timer->invokeCallback(); EXPECT_THAT(stream_info_.connectionTerminationDetails(), Optional(HasSubstr("transport socket timeout"))); + EXPECT_EQ(server_connection->transportFailureReason(), "connect timeout"); } TEST_P(ConnectionImplTest, SetServerTransportSocketTimeoutAfterConnect) { diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 20251102575f8..0d5afe756ce21 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -796,6 +796,60 @@ TEST_P(DnsImplTest, MultiARecordLookupWithV6) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +TEST_P(DnsImplTest, AutoOnlyV6IfBothV6andV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::Auto, + DnsResolver::ResolutionStatus::Success, {{"1::2"}}, {}, + absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, AutoV6IfOnlyV6) { + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::Auto, + DnsResolver::ResolutionStatus::Success, {{"1::2"}}, {}, + absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, AutoV4IfOnlyV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::Auto, + DnsResolver::ResolutionStatus::Success, + {{"201.134.56.7"}}, {}, absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, V4PreferredOnlyV4IfBothV6andV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Success, + {{"201.134.56.7"}}, {}, absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, V4PreferredV6IfOnlyV6) { + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Success, {{"1::2"}}, {}, + absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, V4PreferredV4IfOnlyV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Success, + {{"201.134.56.7"}}, {}, absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + // Validate working of cancellation provided by ActiveDnsQuery return. TEST_P(DnsImplTest, Cancel) { server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); diff --git a/test/common/network/filter_manager_impl_test.cc b/test/common/network/filter_manager_impl_test.cc index 4016b3ae82a4d..c77086bbb18a2 100644 --- a/test/common/network/filter_manager_impl_test.cc +++ b/test/common/network/filter_manager_impl_test.cc @@ -1,14 +1,12 @@ #include #include -#include "envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h" #include "envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h" #include "source/common/buffer/buffer_impl.h" #include "source/common/network/filter_manager_impl.h" #include "source/common/tcp_proxy/tcp_proxy.h" #include "source/common/upstream/upstream_impl.h" -#include "source/extensions/filters/network/ratelimit/ratelimit.h" #include "test/common/upstream/utility.h" #include "test/extensions/filters/common/ratelimit/mocks.h" @@ -359,77 +357,6 @@ TEST_F(NetworkFilterManagerTest, EndStream) { manager.onWrite(); } -// This is a very important flow so make sure it works correctly in aggregate. -TEST_F(NetworkFilterManagerTest, RateLimitAndTcpProxy) { - InSequence s; - NiceMock factory_context; - NiceMock upstream_connection; - NiceMock conn_pool; - FilterManagerImpl manager(connection_, socket_); - - std::string rl_yaml = R"EOF( -domain: foo -descriptors: -- entries: - - key: hello - value: world -stat_prefix: name - )EOF"; - - ON_CALL(factory_context.runtime_loader_.snapshot_, - featureEnabled("ratelimit.tcp_filter_enabled", 100)) - .WillByDefault(Return(true)); - ON_CALL(factory_context.runtime_loader_.snapshot_, - featureEnabled("ratelimit.tcp_filter_enforcing", 100)) - .WillByDefault(Return(true)); - - envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{}; - TestUtility::loadFromYaml(rl_yaml, proto_config); - - Extensions::NetworkFilters::RateLimitFilter::ConfigSharedPtr rl_config( - new Extensions::NetworkFilters::RateLimitFilter::Config(proto_config, factory_context.scope_, - factory_context.runtime_loader_)); - Extensions::Filters::Common::RateLimit::MockClient* rl_client = - new Extensions::Filters::Common::RateLimit::MockClient(); - manager.addReadFilter(std::make_shared( - rl_config, Extensions::Filters::Common::RateLimit::ClientPtr{rl_client})); - - factory_context.cluster_manager_.initializeThreadLocalClusters({"fake_cluster"}); - envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy; - tcp_proxy.set_stat_prefix("name"); - tcp_proxy.set_cluster("fake_cluster"); - TcpProxy::ConfigSharedPtr tcp_proxy_config(new TcpProxy::Config(tcp_proxy, factory_context)); - manager.addReadFilter( - std::make_shared(tcp_proxy_config, factory_context.cluster_manager_)); - - Extensions::Filters::Common::RateLimit::RequestCallbacks* request_callbacks{}; - EXPECT_CALL(*rl_client, limit(_, "foo", - testing::ContainerEq( - std::vector{{{{"hello", "world"}}}}), - testing::A(), _)) - .WillOnce(WithArgs<0>( - Invoke([&](Extensions::Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - request_callbacks = &callbacks; - }))); - - EXPECT_EQ(manager.initializeReadFilters(), true); - - EXPECT_CALL(factory_context.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) - .WillOnce(Return(Upstream::TcpPoolData([]() {}, &conn_pool))); - - request_callbacks->complete(Extensions::Filters::Common::RateLimit::LimitStatus::OK, nullptr, - nullptr, nullptr, "", nullptr); - - conn_pool.poolReady(upstream_connection); - - Buffer::OwnedImpl buffer("hello"); - EXPECT_CALL(upstream_connection, write(BufferEqual(&buffer), _)); - read_buffer_.add("hello"); - manager.onRead(); - - connection_.raiseEvent(ConnectionEvent::RemoteClose); -} - TEST_F(NetworkFilterManagerTest, InjectReadDataToFilterChain) { InSequence s; diff --git a/test/common/network/listen_socket_impl_test.cc b/test/common/network/listen_socket_impl_test.cc index 8f514e993084c..4ca83f9604494 100644 --- a/test/common/network/listen_socket_impl_test.cc +++ b/test/common/network/listen_socket_impl_test.cc @@ -35,16 +35,15 @@ TEST(ConnectionSocketImplTest, LowerCaseRequestedServerName) { template class ListenSocketImplTest : public testing::TestWithParam { + using ListenSocketType = NetworkListenSocket>; + protected: ListenSocketImplTest() : version_(GetParam()) {} const Address::IpVersion version_; template - std::unique_ptr createListenSocketPtr(Args&&... args) { - using NetworkSocketTraitType = NetworkSocketTrait; - - return std::make_unique>( - std::forward(args)...); + std::unique_ptr createListenSocketPtr(Args&&... args) { + return std::make_unique(std::forward(args)...); } void testBindSpecificPort() { @@ -76,7 +75,7 @@ class ListenSocketImplTest : public testing::TestWithParam { EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_PREBIND)) .WillOnce(Return(true)); options->emplace_back(std::move(option)); - std::unique_ptr socket1; + std::unique_ptr socket1; try { socket1 = createListenSocketPtr(addr, options, true); } catch (SocketBindException& e) { @@ -139,6 +138,19 @@ class ListenSocketImplTest : public testing::TestWithParam { EXPECT_GT(socket->connectionInfoProvider().localAddress()->ip()->port(), 0U); EXPECT_EQ(Type, socket->socketType()); } + + // Verify that a listen sockets that do not bind to port can be duplicated and closed. + void testNotBindToPort() { + auto local_address = version_ == Address::IpVersion::v4 ? Utility::getIpv6AnyAddress() + : Utility::getIpv4AnyAddress(); + auto socket = NetworkListenSocket>(local_address, nullptr, + /*bind_to_port=*/false); + auto dup_socket = socket.duplicate(); + EXPECT_FALSE(socket.isOpen()); + EXPECT_FALSE(dup_socket->isOpen()); + socket.close(); + dup_socket->close(); + } }; using ListenSocketImplTestTcp = ListenSocketImplTest; @@ -162,9 +174,23 @@ class TestListenSocket : public ListenSocketImpl { public: TestListenSocket(Address::InstanceConstSharedPtr address) : ListenSocketImpl(std::make_unique(), address) {} + + TestListenSocket(Address::IpVersion ip_version) + : ListenSocketImpl(/*io_handle=*/nullptr, ip_version == Address::IpVersion::v4 + ? Utility::getIpv4AnyAddress() + : Utility::getIpv6AnyAddress()) {} Socket::Type socketType() const override { return Socket::Type::Stream; } + + bool isOpen() const override { return ListenSocketImpl::isOpen(); } + void close() override { ListenSocketImpl::close(); } }; +TEST_P(ListenSocketImplTestTcp, NonIoHandleListenSocket) { + TestListenSocket sock(version_); + EXPECT_FALSE(sock.isOpen()); + sock.close(); +} + TEST_P(ListenSocketImplTestTcp, SetLocalAddress) { std::string address_str = "10.1.2.3"; if (version_ == Address::IpVersion::v6) { @@ -228,6 +254,10 @@ TEST_P(ListenSocketImplTestTcp, BindPortZero) { testBindPortZero(); } TEST_P(ListenSocketImplTestUdp, BindPortZero) { testBindPortZero(); } +TEST_P(ListenSocketImplTestTcp, NotBindToPortAccess) { testNotBindToPort(); } + +TEST_P(ListenSocketImplTestUdp, NotBindToPortAccess) { testNotBindToPort(); } + } // namespace } // namespace Network } // namespace Envoy diff --git a/test/common/network/socket_option_impl_test.cc b/test/common/network/socket_option_impl_test.cc index c2736caed50d9..6979b7b7f3fb9 100644 --- a/test/common/network/socket_option_impl_test.cc +++ b/test/common/network/socket_option_impl_test.cc @@ -41,6 +41,10 @@ TEST_F(SocketOptionImplTest, HasName) { EXPECT_LOG_CONTAINS( "warning", "Setting SOL_SOCKET/SO_SNDBUF option on socket failed", socket_option.setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND)); + + std::vector hash_key; + socket_option.hashKey(hash_key); + EXPECT_FALSE(hash_key.empty()); } TEST_F(SocketOptionImplTest, SetOptionSuccessTrue) { diff --git a/test/common/protobuf/BUILD b/test/common/protobuf/BUILD index 1118273fdbac0..1f61758acb1b2 100644 --- a/test/common/protobuf/BUILD +++ b/test/common/protobuf/BUILD @@ -3,6 +3,7 @@ load( "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package", + "envoy_proto_library", ) licenses(["notice"]) # Apache 2 @@ -20,10 +21,24 @@ envoy_cc_test( ], ) +envoy_proto_library( + name = "utility_test_protos", + srcs = [ + "utility_test_file_wip.proto", + "utility_test_file_wip_2.proto", + "utility_test_message_field_wip.proto", + ], + deps = [ + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", + ], +) + envoy_cc_test( name = "utility_test", srcs = ["utility_test.cc"], deps = [ + ":utility_test_protos_cc_proto", "//source/common/config:api_version_lib", "//source/common/protobuf:utility_lib", "//test/common/stats:stat_test_utility_lib", diff --git a/test/common/protobuf/message_validator_impl_test.cc b/test/common/protobuf/message_validator_impl_test.cc index 73dd2dbcba6b9..18d6d8b81fd29 100644 --- a/test/common/protobuf/message_validator_impl_test.cc +++ b/test/common/protobuf/message_validator_impl_test.cc @@ -23,7 +23,8 @@ TEST(NullValidationVisitorImpl, UnknownField) { // The warning validation visitor logs and bumps stats on unknown fields TEST(WarningValidationVisitorImpl, UnknownField) { Stats::TestUtil::TestStore stats; - Stats::Counter& unknown_counter = stats.counter("counter"); + Stats::Counter& unknown_counter = stats.counter("unknown_counter"); + Stats::Counter& wip_counter = stats.counter("wip_counter"); WarningValidationVisitorImpl warning_validation_visitor; // we want to be executed. EXPECT_FALSE(warning_validation_visitor.skipValidation()); @@ -38,7 +39,7 @@ TEST(WarningValidationVisitorImpl, UnknownField) { warning_validation_visitor.onUnknownField("bar")); // When we set the stats counter, the above increments are transferred. EXPECT_EQ(0, unknown_counter.value()); - warning_validation_visitor.setUnknownCounter(unknown_counter); + warning_validation_visitor.setCounters(unknown_counter, wip_counter); EXPECT_EQ(2, unknown_counter.value()); // A third unknown field is tracked in stats post-initialization. EXPECT_LOG_CONTAINS("warn", "Unknown field: baz", diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 3d5875da1b223..c5fe0709e00cf 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -21,6 +21,9 @@ #include "source/common/protobuf/utility.h" #include "source/common/runtime/runtime_impl.h" +#include "test/common/protobuf/utility_test_file_wip.pb.h" +#include "test/common/protobuf/utility_test_file_wip_2.pb.h" +#include "test/common/protobuf/utility_test_message_field_wip.pb.h" #include "test/common/stats/stat_test_utility.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" @@ -45,30 +48,17 @@ using testing::HasSubstr; class RuntimeStatsHelper : public TestScopedRuntime { public: - RuntimeStatsHelper(bool allow_deprecated_v2_api = false) + explicit RuntimeStatsHelper() : runtime_deprecated_feature_use_(store_.counter("runtime.deprecated_feature_use")), deprecated_feature_seen_since_process_start_( store_.gauge("runtime.deprecated_feature_seen_since_process_start", - Stats::Gauge::ImportMode::NeverImport)) { - if (allow_deprecated_v2_api) { - Runtime::LoaderSingleton::getExisting()->mergeValues({ - {"envoy.test_only.broken_in_production.enable_deprecated_v2_api", "true"}, - {"envoy.features.enable_all_deprecated_features", "true"}, - }); - } - } + Stats::Gauge::ImportMode::NeverImport)) {} Stats::Counter& runtime_deprecated_feature_use_; Stats::Gauge& deprecated_feature_seen_since_process_start_; }; class ProtobufUtilityTest : public testing::Test, protected RuntimeStatsHelper {}; -// TODO(htuch): During/before the v2 removal, cleanup the various examples that explicitly refer to -// v2 API protos and replace with upgrade examples not tie to the concrete API. -class ProtobufV2ApiUtilityTest : public testing::Test, protected RuntimeStatsHelper { -public: - ProtobufV2ApiUtilityTest() : RuntimeStatsHelper(true) {} -}; TEST_F(ProtobufUtilityTest, ConvertPercentNaNDouble) { envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; @@ -455,6 +445,37 @@ insensitive_string: This field should not be redacted. EXPECT_TRUE(TestUtility::protoEqual(expected, actual)); } +// Fields that are values in a sensitive map should be redacted. +TEST_F(ProtobufUtilityTest, RedactMap) { + envoy::test::Sensitive actual, expected; + TestUtility::loadFromYaml(R"EOF( +sensitive_string_map: + "a": "b" +sensitive_int_map: + "x": 12345 +insensitive_string_map: + "c": "d" +insensitive_int_map: + "y": 123456 +)EOF", + actual); + + TestUtility::loadFromYaml(R"EOF( +sensitive_string_map: + "a": "[redacted]" +sensitive_int_map: + "x": +insensitive_string_map: + "c": "d" +insensitive_int_map: + "y": 123456 +)EOF", + expected); + + MessageUtil::redact(actual); + EXPECT_TRUE(TestUtility::protoEqual(expected, actual)); +} + // Bytes fields annotated as sensitive should be converted to the ASCII / UTF-8 encoding of the // string "[redacted]". Bytes fields that are neither annotated as sensitive nor contained in a // sensitive message should be left alone. @@ -1333,7 +1354,7 @@ TEST_F(ProtobufUtilityTest, AnyConvertAndValidateFailedValidation) { } // MessageUtility::unpackTo() with the wrong type throws. -TEST_F(ProtobufV2ApiUtilityTest, UnpackToWrongType) { +TEST_F(ProtobufUtilityTest, UnpackToWrongType) { ProtobufWkt::Duration source_duration; source_duration.set_seconds(42); ProtobufWkt::Any source_any; @@ -1560,6 +1581,76 @@ TEST(DurationUtilTest, OutOfRange) { } } +// Verify WIP accounting of the file based annotations. This test uses the strict validator to test +// that code path. +TEST_F(ProtobufUtilityTest, MessageInWipFile) { + Stats::TestUtil::TestStore stats; + Stats::Counter& wip_counter = stats.counter("wip_counter"); + ProtobufMessage::StrictValidationVisitorImpl validation_visitor; + + utility_test::file_wip::Foo foo; + EXPECT_LOG_CONTAINS( + "warning", + "message 'utility_test.file_wip.Foo' is contained in proto file " + "'test/common/protobuf/utility_test_file_wip.proto' marked as work-in-progress. API features " + "marked as work-in-progress are not considered stable, are not covered by the threat model, " + "are not supported by the security team, and are subject to breaking changes. Do not use " + "this feature without understanding each of the previous points.", + MessageUtil::checkForUnexpectedFields(foo, validation_visitor)); + + EXPECT_EQ(0, wip_counter.value()); + validation_visitor.setCounters(wip_counter); + EXPECT_EQ(1, wip_counter.value()); + + utility_test::file_wip_2::Foo foo2; + EXPECT_LOG_CONTAINS( + "warning", + "message 'utility_test.file_wip_2.Foo' is contained in proto file " + "'test/common/protobuf/utility_test_file_wip_2.proto' marked as work-in-progress. API " + "features marked as work-in-progress are not considered stable, are not covered by the " + "threat model, are not supported by the security team, and are subject to breaking changes. " + "Do not use this feature without understanding each of the previous points.", + MessageUtil::checkForUnexpectedFields(foo2, validation_visitor)); + + EXPECT_EQ(2, wip_counter.value()); +} + +// Verify WIP accounting for message and field annotations. This test uses the warning validator +// to test that code path. +TEST_F(ProtobufUtilityTest, MessageWip) { + Stats::TestUtil::TestStore stats; + Stats::Counter& unknown_counter = stats.counter("unknown_counter"); + Stats::Counter& wip_counter = stats.counter("wip_counter"); + ProtobufMessage::WarningValidationVisitorImpl validation_visitor; + + utility_test::message_field_wip::Foo foo; + EXPECT_LOG_CONTAINS( + "warning", + "message 'utility_test.message_field_wip.Foo' is marked as work-in-progress. API features " + "marked as work-in-progress are not considered stable, are not covered by the threat model, " + "are not supported by the security team, and are subject to breaking changes. Do not use " + "this feature without understanding each of the previous points.", + MessageUtil::checkForUnexpectedFields(foo, validation_visitor)); + + EXPECT_EQ(0, wip_counter.value()); + validation_visitor.setCounters(unknown_counter, wip_counter); + EXPECT_EQ(1, wip_counter.value()); + + utility_test::message_field_wip::Bar bar; + EXPECT_NO_LOGS(MessageUtil::checkForUnexpectedFields(bar, validation_visitor)); + + bar.set_test_field(true); + EXPECT_LOG_CONTAINS( + "warning", + "field 'utility_test.message_field_wip.Bar.test_field' is marked as work-in-progress. API " + "features marked as work-in-progress are not considered stable, are not covered by the " + "threat model, are not supported by the security team, and are subject to breaking changes. " + "Do not use this feature without understanding each of the previous points.", + MessageUtil::checkForUnexpectedFields(bar, validation_visitor)); + + EXPECT_EQ(2, wip_counter.value()); +} + class DeprecatedFieldsTest : public testing::Test, protected RuntimeStatsHelper { protected: void checkForDeprecation(const Protobuf::Message& message) { diff --git a/test/common/protobuf/utility_test_file_wip.proto b/test/common/protobuf/utility_test_file_wip.proto new file mode 100644 index 0000000000000..164f1b20d1aa1 --- /dev/null +++ b/test/common/protobuf/utility_test_file_wip.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package utility_test.file_wip; + +import "udpa/annotations/status.proto"; + +option (udpa.annotations.file_status).work_in_progress = true; + +message Foo { +} diff --git a/test/common/protobuf/utility_test_file_wip_2.proto b/test/common/protobuf/utility_test_file_wip_2.proto new file mode 100644 index 0000000000000..bf31490990917 --- /dev/null +++ b/test/common/protobuf/utility_test_file_wip_2.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package utility_test.file_wip_2; + +import "xds/annotations/v3/status.proto"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +message Foo { +} diff --git a/test/common/protobuf/utility_test_message_field_wip.proto b/test/common/protobuf/utility_test_message_field_wip.proto new file mode 100644 index 0000000000000..f1a2ce1c64798 --- /dev/null +++ b/test/common/protobuf/utility_test_message_field_wip.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package utility_test.message_field_wip; + +import "xds/annotations/v3/status.proto"; + +message Foo { + option (xds.annotations.v3.message_status).work_in_progress = true; +} + +message Bar { + bool test_field = 1 [(xds.annotations.v3.field_status).work_in_progress = true]; +} diff --git a/test/common/quic/active_quic_listener_test.cc b/test/common/quic/active_quic_listener_test.cc index 06b49bd5cac6a..9f92852df8769 100644 --- a/test/common/quic/active_quic_listener_test.cc +++ b/test/common/quic/active_quic_listener_test.cc @@ -190,12 +190,12 @@ class ActiveQuicListenerTest : public testing::TestWithParamallocateDispatcher("test_thread")), connection_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_decline_server_push_stream, true); - return quic::CurrentSupportedHttp3Versions(); - }()), + alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), + quic_version_([]() { return quic::CurrentSupportedHttp3Versions(); }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), 12345)), self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), diff --git a/test/common/quic/envoy_quic_dispatcher_test.cc b/test/common/quic/envoy_quic_dispatcher_test.cc index 06119487b876b..7e7e4fe0e49fa 100644 --- a/test/common/quic/envoy_quic_dispatcher_test.cc +++ b/test/common/quic/envoy_quic_dispatcher_test.cc @@ -303,5 +303,56 @@ TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponBufferedCHLO) { processValidChloPacketAndInitializeFilters(true); } +TEST_P(EnvoyQuicDispatcherTest, CloseWithGivenFilterChain) { + Network::MockFilterChainManager filter_chain_manager; + std::shared_ptr read_filter(new Network::MockReadFilter()); + Network::MockConnectionCallbacks network_connection_callbacks; + testing::StrictMock read_total; + testing::StrictMock read_current; + testing::StrictMock write_total; + testing::StrictMock write_current; + + std::vector filter_factory( + {[&](Network::FilterManager& filter_manager) { + filter_manager.addReadFilter(read_filter); + read_filter->callbacks_->connection().addConnectionCallbacks(network_connection_callbacks); + read_filter->callbacks_->connection().setConnectionStats( + {read_total, read_current, write_total, write_current, nullptr, nullptr}); + }}); + EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); + EXPECT_CALL(filter_chain_manager, findFilterChain(_)) + .WillOnce(Return(&proof_source_->filterChain())); + Network::MockTransportSocketFactory transport_socket_factory; + EXPECT_CALL(proof_source_->filterChain(), transportSocketFactory()) + .WillOnce(ReturnRef(transport_socket_factory)); + EXPECT_CALL(proof_source_->filterChain(), networkFilterFactories()) + .WillOnce(ReturnRef(filter_factory)); + EXPECT_CALL(listener_config_, filterChainFactory()); + EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) + .WillOnce(Invoke([](Network::Connection& connection, + const std::vector& filter_factories) { + EXPECT_EQ(1u, filter_factories.size()); + Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); + return true; + })); + EXPECT_CALL(*read_filter, onNewConnection()) + // Stop iteration to avoid calling getRead/WriteBuffer(). + .WillOnce(Return(Network::FilterStatus::StopIteration)); + + quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 + ? quic::QuicIpAddress::Loopback4() + : quic::QuicIpAddress::Loopback6(), + 54321); + // Set QuicDispatcher::new_sessions_allowed_per_event_loop_ to + // |kNumSessionsToCreatePerLoopForTests| so that received CHLOs can be + // processed immediately. + envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); + + processValidChloPacket(peer_addr); + + EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); + envoy_quic_dispatcher_.closeConnectionsWithFilterChain(&proof_source_->filterChain()); +} + } // namespace Quic } // namespace Envoy diff --git a/test/common/quic/envoy_quic_proof_source_test.cc b/test/common/quic/envoy_quic_proof_source_test.cc index 75230db0f504f..9ef2e3fe54706 100644 --- a/test/common/quic/envoy_quic_proof_source_test.cc +++ b/test/common/quic/envoy_quic_proof_source_test.cc @@ -144,6 +144,7 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { listener_config_.listenerScope(), std::unique_ptr(mock_context_config_)); transport_socket_factory_->initialize(); + EXPECT_CALL(filter_chain_, name()).WillRepeatedly(Return("")); } void expectCertChainAndPrivateKey(const std::string& cert, bool expect_private_key) { @@ -193,8 +194,9 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { TEST_F(EnvoyQuicProofSourceTest, TestGetCerChainAndSignatureAndVerify) { expectCertChainAndPrivateKey(expected_certs_, true); + bool cert_matched_sni; quic::QuicReferenceCountedPointer chain = - proof_source_.GetCertChain(server_address_, client_address_, hostname_); + proof_source_.GetCertChain(server_address_, client_address_, hostname_, &cert_matched_sni); EXPECT_EQ(2, chain->certs.size()); std::string error_details; @@ -216,7 +218,9 @@ TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailBadConfig) { EXPECT_CALL(listen_socket_, ioHandle()).Times(3); EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return nullptr; })); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + bool cert_matched_sni; + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); // Cert not ready. EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) @@ -224,7 +228,8 @@ TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailBadConfig) { EXPECT_CALL(filter_chain_, transportSocketFactory()) .WillOnce(ReturnRef(*transport_socket_factory_)); EXPECT_CALL(*mock_context_config_, isReady()).WillOnce(Return(false)); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); // No certs in config. EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) @@ -242,7 +247,8 @@ TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailBadConfig) { EXPECT_CALL(*mock_context_config_, isReady()).WillOnce(Return(true)); std::vector> tls_cert_configs{}; EXPECT_CALL(*mock_context_config_, tlsCertificates()).WillOnce(Return(tls_cert_configs)); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); } TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailInvalidCert) { @@ -250,7 +256,9 @@ TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailInvalidCert) { invalid certificate -----END CERTIFICATE-----)"}; expectCertChainAndPrivateKey(invalid_cert, false); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + bool cert_matched_sni; + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); } TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailInvalidPublicKeyInCert) { @@ -275,7 +283,9 @@ x96rVeUbRJ/qU4//nNM/XQa9vIAIcTZ0jFhmb0c3R4rmoqqC3vkSDwtaE5yuS5T4 GUy+n0vQNB0cXGzgcGI= -----END CERTIFICATE-----)"}; expectCertChainAndPrivateKey(cert_with_rsa_1024, false); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + bool cert_matched_sni; + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); } TEST_F(EnvoyQuicProofSourceTest, ComputeSignatureFailNoFilterChain) { diff --git a/test/common/quic/envoy_quic_proof_verifier_test.cc b/test/common/quic/envoy_quic_proof_verifier_test.cc index 21b7be3136a7e..304488c49a664 100644 --- a/test/common/quic/envoy_quic_proof_verifier_test.cc +++ b/test/common/quic/envoy_quic_proof_verifier_test.cc @@ -7,6 +7,7 @@ #include "test/mocks/ssl/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/test_time.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -78,7 +79,7 @@ class EnvoyQuicProofVerifierTest : public testing::Test { const std::string empty_string_; const std::vector empty_string_list_; const std::string cert_chain_{quic::test::kTestCertificateChainPem}; - const std::string root_ca_cert_; + std::string root_ca_cert_; const std::string leaf_cert_; const absl::optional custom_validator_config_{ absl::nullopt}; @@ -119,6 +120,12 @@ TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureFromSsl) { error_details); } +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidCA) { + root_ca_cert_ = "invalid root CA"; + EXPECT_THROW_WITH_REGEX(configCertVerificationDetails(true), EnvoyException, + "Failed to load trusted CA certificates from"); +} + TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidLeafCert) { configCertVerificationDetails(true); const std::string ocsp_response; @@ -192,5 +199,76 @@ VdGXMAjeXhnOnPvmDi5hUz/uvI+Pg6cNmUoCRwSCnK/DazhA EXPECT_EQ("Invalid leaf cert, only P-256 ECDSA certificates are supported", error_details); } +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureNonServerAuthEKU) { + // Override the CA cert with cert copied from test/config/integration/certs/cacert.pem. + root_ca_cert_ = R"(-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIUdCu/mLip3X/We37vh3BA9u/nxakwDQYJKoZIhvcNAQEL +BQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM +DVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n +aW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODA1MTkxNjAwWhcNMjIw +ODA1MTkxNjAwWjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW +MBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ +THlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBALu2Ihi4DmaQG7zySZlWyM9SjxOXCI5840V7Hn0C +XoiI8sQQmKSC2YCzsaphQoJ0lXCi6Y47o5FkooYyLeNDQTGS0nh+IWm5RCyochtO +fnaKPv/hYxhpyFQEwkJkbF1Zt1s6j2rq5MzmbWZx090uXZEE82DNZ9QJaMPu6VWt +iwGoGoS5HF5HNlUVxLNUsklNH0ZfDafR7/LC2ty1vO1c6EJ6yCGiyJZZ7Ilbz27Q +HPAUd8CcDNKCHZDoMWkLSLN3Nj1MvPVZ5HDsHiNHXthP+zV8FQtloAuZ8Srsmlyg +rJREkc7gF3f6HrH5ShNhsRFFc53NUjDbYZuha1u4hiOE8lcCAwEAAaNjMGEwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJZL2ixTtL6V +xpNz4qekny4NchiHMB8GA1UdIwQYMBaAFJZL2ixTtL6VxpNz4qekny4NchiHMA0G +CSqGSIb3DQEBCwUAA4IBAQAcgG+AaCdrUFEVJDn9UsO7zqzQ3c1VOp+WAtAU8OQK +Oc4vJYVVKpDs8OZFxmukCeqm1gz2zDeH7TfgCs5UnLtkplx1YO1bd9qvserJVHiD +LAK+Yl24ZEbrHPaq0zI1RLchqYUOGWmi51pcXi1gsfc8DQ3GqIXoai6kYJeV3jFJ +jxpQSR32nx6oNN/6kVKlgmBjlWrOy7JyDXGim6Z97TzmS6Clctewmw/5gZ9g+M8e +g0ZdFbFkNUjzSNm44hiDX8nR6yJRn+gLaARaJvp1dnT+MlvofZuER17WYKH4OyMs +ie3qKR3an4KC20CtFbpZfv540BVuTTOCtQ5xqZ/LTE78 +-----END CERTIFICATE-----)"; + configCertVerificationDetails(true); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + // This is a cert generated with the test/config/integration/certs/certs.sh. And the config that + // used to generate this cert is same as test/config/integration/certs/servercert.cfg but with + // 'extKeyUsage: clientAuth'. + const std::string certs{R"(-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIUWzmfQSTX8xfzUzdByjCjCJN8E/wwDQYJKoZIhvcNAQEL +BQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM +DVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n +aW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjEwOTI5MTY0NTM3WhcNMjMw +OTI5MTY0NTM3WjCBpjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx +FjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM +EEx5ZnQgRW5naW5lZXJpbmcxGjAYBgNVBAMMEVRlc3QgQmFja2VuZCBUZWFtMSQw +IgYJKoZIhvcNAQkBFhViYWNrZW5kLXRlYW1AbHlmdC5jb20wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9JgaI7hxjPM0tsUna/QmivBdKbCrLnLW9Teak +RH/Ebg68ovyvrRIlybDT6XhKi+iVpzVY9kqxhGHgrFDgGLBakVMiYJ5EjIgHfoo4 +UUAHwIYbunJluYCgANzpprBsvTC/yFYDVMqUrjvwHsoYYVm36io994k9+t813b70 +o0l7/PraBsKkz8NcY2V2mrd/yHn/0HAhv3hl6iiJme9yURuDYQrae2ACSrQtsbel +KwdZ/Re71Z1awz0OQmAjMa2HuCop+Q/1QLnqBekT5+DH1qKUzJ3Jkq6NRkERXOpi +87j04rtCBteCogrO67qnuBZ2lH3jYEMb+lQdLkyNMLltBSdLAgMBAAGjgbYwgbMw +DAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwEwYDVR0lBAwwCgYIKwYBBQUHAwIw +QQYDVR0RBDowOIYec3BpZmZlOi8vbHlmdC5jb20vYmFja2VuZC10ZWFtgghseWZ0 +LmNvbYIMd3d3Lmx5ZnQuY29tMB0GA1UdDgQWBBTZdxNltzTEpl+A1UpK8BsxkkIG +hjAfBgNVHSMEGDAWgBSWS9osU7S+lcaTc+KnpJ8uDXIYhzANBgkqhkiG9w0BAQsF +AAOCAQEAhiXkQJZ53L3uoQMX6xNhAFThomirnLm2RT10kPIbr5mmf3wcR8+EKrWX +dWCj56bk1tSDbQZqx33DSGbhvNaydggbo69Pkie5b7J9O7AWzT21NME6Jis9hHED +VUI63L+7SgJ2oZs0o8xccUaLFeknuNdQL4qUEwhMwCC8kYLz+c6g0qwDwZi1MtdL +YR4qm2S6KveVPGzBHpUjfWf/whSCM3JN5Fm8gWfC6d6XEYz6z1dZrj3lpwmhRgF6 +Wb72f68jzCQ3BFqKRFsJI2xz3EP6PoQ+e6EQjMpjQLomxIhIN/aTsgrKwA5wf6vQ +ZCFbredVxDBZuoVsfrKPSQa407Jj1Q== +-----END CERTIFICATE-----)"}; + std::stringstream pem_stream(certs); + std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(chain[0]); + ASSERT(cert_view); + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain("lyft.com", 54321, chain, ocsp_response, cert_sct, nullptr, + &error_details, nullptr, nullptr, nullptr)); + EXPECT_EQ("X509_verify_cert: certificate verification error at depth 0: unsupported certificate " + "purpose", + error_details); +} + } // namespace Quic } // namespace Envoy diff --git a/test/common/quic/envoy_quic_server_session_test.cc b/test/common/quic/envoy_quic_server_session_test.cc index f44b359cc2b78..a8bcadbaf4b17 100644 --- a/test/common/quic/envoy_quic_server_session_test.cc +++ b/test/common/quic/envoy_quic_server_session_test.cc @@ -151,10 +151,8 @@ class EnvoyQuicServerSessionTest : public testing::Test { EnvoyQuicServerSessionTest() : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_({[]() { - SetQuicReloadableFlag(quic_decline_server_push_stream, true); - return quic::CurrentSupportedHttp3Versions()[0]; - }()}), + alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), + quic_version_({[]() { return quic::CurrentSupportedHttp3Versions()[0]; }()}), quic_stat_names_(listener_config_.listenerScope().symbolTable()), quic_connection_(new MockEnvoyQuicServerConnection( connection_helper_, alarm_factory_, writer_, quic_version_, *listener_config_.socket_)), @@ -166,8 +164,7 @@ class EnvoyQuicServerSessionTest : public testing::Test { &compressed_certs_cache_, *dispatcher_, /*send_buffer_limit*/ quic::kDefaultFlowControlSendWindow * 1.5, quic_stat_names_, listener_config_.listenerScope(), - crypto_stream_factory_, - makeOptRefFromPtr(nullptr)), + crypto_stream_factory_), stats_({ALL_HTTP3_CODEC_STATS( POOL_COUNTER_PREFIX(listener_config_.listenerScope(), "http3."), POOL_GAUGE_PREFIX(listener_config_.listenerScope(), "http3."))}) { diff --git a/test/common/quic/envoy_quic_server_stream_test.cc b/test/common/quic/envoy_quic_server_stream_test.cc index 74d5459db9da6..ee01d965e3fd1 100644 --- a/test/common/quic/envoy_quic_server_stream_test.cc +++ b/test/common/quic/envoy_quic_server_stream_test.cc @@ -98,7 +98,9 @@ class EnvoyQuicServerStreamTest : public testing::Test { void TearDown() override { if (quic_connection_.connected()) { EXPECT_CALL(quic_session_, MaybeSendRstStreamFrame(_, _, _)).Times(testing::AtMost(1u)); - EXPECT_CALL(quic_session_, MaybeSendStopSendingFrame(_, quic::QUIC_STREAM_NO_ERROR)) + EXPECT_CALL(quic_session_, + MaybeSendStopSendingFrame( + _, quic::QuicResetStreamError::FromInternal(quic::QUIC_STREAM_NO_ERROR))) .Times(testing::AtMost(1u)); EXPECT_CALL(quic_connection_, SendConnectionClosePacket(_, quic::NO_IETF_QUIC_ERROR, "Closed by application")); @@ -228,7 +230,7 @@ TEST_F(EnvoyQuicServerStreamTest, ReceiveStopSending) { // Receiving STOP_SENDING alone should trigger upstream reset. EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::RemoteReset, _)); EXPECT_CALL(quic_session_, MaybeSendRstStreamFrame(_, _, _)); - quic_stream_->OnStopSending(quic::QUIC_STREAM_NO_ERROR); + quic_stream_->OnStopSending(quic::QuicResetStreamError::FromInternal(quic::QUIC_STREAM_NO_ERROR)); EXPECT_FALSE(quic_stream_->read_side_closed()); // Following FIN should be discarded and the stream should be closed. diff --git a/test/common/quic/envoy_quic_writer_test.cc b/test/common/quic/envoy_quic_writer_test.cc index 3908fb82ba568..f6a21ffce8920 100644 --- a/test/common/quic/envoy_quic_writer_test.cc +++ b/test/common/quic/envoy_quic_writer_test.cc @@ -90,7 +90,7 @@ TEST_F(EnvoyQuicWriterTest, SendBlocked) { quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); EXPECT_EQ(quic::WRITE_STATUS_BLOCKED, result.status); - EXPECT_EQ(static_cast(Api::IoError::IoErrorCode::Again), result.error_code); + EXPECT_EQ(SOCKET_ERROR_AGAIN, result.error_code); EXPECT_TRUE(envoy_quic_writer_.IsWriteBlocked()); // Writing while blocked is not allowed. #ifdef NDEBUG @@ -117,7 +117,7 @@ TEST_F(EnvoyQuicWriterTest, SendFailure) { quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); EXPECT_EQ(quic::WRITE_STATUS_ERROR, result.status); - EXPECT_EQ(static_cast(Api::IoError::IoErrorCode::NoSupport), result.error_code); + EXPECT_EQ(SOCKET_ERROR_NOT_SUP, result.error_code); EXPECT_FALSE(envoy_quic_writer_.IsWriteBlocked()); } @@ -133,7 +133,7 @@ TEST_F(EnvoyQuicWriterTest, SendFailureMessageTooBig) { // Currently MessageSize should be propagated through error_code. This test // would fail if QUICHE changes to propagate through status in the future. EXPECT_EQ(quic::WRITE_STATUS_ERROR, result.status); - EXPECT_EQ(static_cast(Api::IoError::IoErrorCode::MessageTooBig), result.error_code); + EXPECT_EQ(SOCKET_ERROR_MSG_SIZE, result.error_code); EXPECT_FALSE(envoy_quic_writer_.IsWriteBlocked()); } diff --git a/test/common/quic/quic_stat_names_test.cc b/test/common/quic/quic_stat_names_test.cc index ff5225208596e..b8fd31d6440b6 100644 --- a/test/common/quic/quic_stat_names_test.cc +++ b/test/common/quic/quic_stat_names_test.cc @@ -37,8 +37,8 @@ TEST_F(QuicStatNamesTest, OutOfRangeQuicConnectionCloseStats) { } TEST_F(QuicStatNamesTest, ResetStreamErrorStats) { - quic_stat_names_.chargeQuicResetStreamErrorStats(scope_, quic::QUIC_STREAM_CANCELLED, true, - false); + quic_stat_names_.chargeQuicResetStreamErrorStats( + scope_, quic::QuicResetStreamError::FromInternal(quic::QUIC_STREAM_CANCELLED), true, false); EXPECT_EQ(1U, scope_.counter("http3.downstream.tx.quic_reset_stream_error_code_QUIC_STREAM_CANCELLED") .value()); @@ -47,7 +47,10 @@ TEST_F(QuicStatNamesTest, ResetStreamErrorStats) { TEST_F(QuicStatNamesTest, OutOfRangeResetStreamErrorStats) { uint64_t bad_error_code = quic::QUIC_STREAM_LAST_ERROR + 1; quic_stat_names_.chargeQuicResetStreamErrorStats( - scope_, static_cast(bad_error_code), true, false); + scope_, + quic::QuicResetStreamError::FromInternal( + static_cast(bad_error_code)), + true, false); EXPECT_EQ( 1U, scope_.counter("http3.downstream.tx.quic_reset_stream_error_code_QUIC_STREAM_LAST_ERROR") .value()); diff --git a/test/common/quic/test_proof_source.h b/test/common/quic/test_proof_source.h index b4a8a8348223b..434c15fecada5 100644 --- a/test/common/quic/test_proof_source.h +++ b/test/common/quic/test_proof_source.h @@ -27,8 +27,9 @@ class TestProofSource : public EnvoyQuicProofSourceBase { public: quic::QuicReferenceCountedPointer GetCertChain(const quic::QuicSocketAddress& /*server_address*/, - const quic::QuicSocketAddress& /*client_address*/, - const std::string& /*hostname*/) override { + const quic::QuicSocketAddress& /*client_address*/, const std::string& /*hostname*/, + bool* cert_matched_sni) override { + *cert_matched_sni = true; return cert_chain_; } diff --git a/test/common/quic/test_utils.h b/test/common/quic/test_utils.h index 3999d237c1ff6..68ea85e782344 100644 --- a/test/common/quic/test_utils.h +++ b/test/common/quic/test_utils.h @@ -105,10 +105,10 @@ class MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterMana quic::EncryptionLevel level)); MOCK_METHOD(bool, ShouldYield, (quic::QuicStreamId id)); MOCK_METHOD(void, MaybeSendRstStreamFrame, - (quic::QuicStreamId id, quic::QuicRstStreamErrorCode error, + (quic::QuicStreamId id, quic::QuicResetStreamError error, quic::QuicStreamOffset bytes_written)); MOCK_METHOD(void, MaybeSendStopSendingFrame, - (quic::QuicStreamId id, quic::QuicRstStreamErrorCode error)); + (quic::QuicStreamId id, quic::QuicResetStreamError error)); MOCK_METHOD(void, dumpState, (std::ostream&, int), (const)); absl::string_view requestedServerName() const override { diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 71bcbb2e18633..c540db9e123f2 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -2891,6 +2891,39 @@ TEST_F(RouteMatcherTest, ClusterHeader) { } } +TEST_F(RouteMatcherTest, WeightedClusterHeader) { + const std::string yaml = R"EOF( + virtual_hosts: + - name: www1 + domains: ["www1.lyft.com"] + routes: + - match: { prefix: "/" } + route: + weighted_clusters: + total_weight: 100 + clusters: + - cluster_header: some_header + weight: 30 + - name: cluster1 + weight: 30 + - name: cluster2 + weight: 40 + )EOF"; + + factory_context_.cluster_manager_.initializeClusters({"some_header", "cluster1", "cluster2"}, {}); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + + Http::TestRequestHeaderMapImpl headers = genHeaders("www1.lyft.com", "/foo", "GET"); + // The configured cluster header isn't present in the request headers, therefore cluster selection + // fails and we get the empty string + EXPECT_EQ("", config.route(headers, 115)->routeEntry()->clusterName()); + // Modify the header mapping. + headers.addCopy("some_header", "some_cluster"); + EXPECT_EQ("some_cluster", config.route(headers, 115)->routeEntry()->clusterName()); + EXPECT_EQ("cluster1", config.route(headers, 445)->routeEntry()->clusterName()); + EXPECT_EQ("cluster2", config.route(headers, 560)->routeEntry()->clusterName()); +} + TEST_F(RouteMatcherTest, ContentType) { const std::string yaml = R"EOF( virtual_hosts: @@ -3337,6 +3370,7 @@ TEST_F(RouteMatcherTest, Retry) { cluster: www2 retry_policy: per_try_timeout: 1s + per_try_idle_timeout: 5s num_retries: 3 retry_on: 5xx,gateway-error,connect-failure,reset )EOF"; @@ -3349,6 +3383,11 @@ TEST_F(RouteMatcherTest, Retry) { ->routeEntry() ->retryPolicy() .perTryTimeout()); + EXPECT_EQ(std::chrono::milliseconds(0), + config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .perTryIdleTimeout()); EXPECT_EQ(1U, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) ->routeEntry() ->retryPolicy() @@ -3378,6 +3417,11 @@ TEST_F(RouteMatcherTest, Retry) { ->routeEntry() ->retryPolicy() .perTryTimeout()); + EXPECT_EQ(std::chrono::milliseconds(5000), + config.route(genHeaders("www.lyft.com", "/", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .perTryIdleTimeout()); EXPECT_EQ(3U, config.route(genHeaders("www.lyft.com", "/", "GET"), 0) ->routeEntry() ->retryPolicy() @@ -3390,25 +3434,56 @@ TEST_F(RouteMatcherTest, Retry) { .retryOn()); } +class TestRetryOptionsPredicateFactory : public Upstream::RetryOptionsPredicateFactory { +public: + Upstream::RetryOptionsPredicateConstSharedPtr + createOptionsPredicate(const Protobuf::Message&, + Upstream::RetryExtensionFactoryContext&) override { + return nullptr; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom empty config proto. This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } + + std::string name() const override { return "test_retry_options_predicate_factory"; } +}; + TEST_F(RouteMatcherTest, RetryVirtualHostLevel) { const std::string yaml = R"EOF( virtual_hosts: - domains: [www.lyft.com] per_request_buffer_limit_bytes: 8 name: www - retry_policy: {num_retries: 3, per_try_timeout: 1s, retry_on: '5xx,gateway-error,connect-failure,reset'} + retry_policy: + num_retries: 3 + per_try_timeout: 1s + retry_on: '5xx,gateway-error,connect-failure,reset' + retry_options_predicates: + - name: test_retry_options_predicate_factory + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct routes: - match: {prefix: /foo} per_request_buffer_limit_bytes: 7 route: cluster: www - retry_policy: {retry_on: connect-failure} + retry_policy: + retry_on: connect-failure + retry_options_predicates: + - name: test_retry_options_predicate_factory + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct - match: {prefix: /bar} route: {cluster: www} - match: {prefix: /} route: {cluster: www} )EOF"; + TestRetryOptionsPredicateFactory factory; + Registry::InjectFactory registered(factory); + factory_context_.cluster_manager_.initializeClusters({"www"}, {}); TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); @@ -3430,6 +3505,11 @@ TEST_F(RouteMatcherTest, RetryVirtualHostLevel) { EXPECT_EQ(7U, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) ->routeEntry() ->retryShadowBufferLimit()); + EXPECT_EQ(1U, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .retryOptionsPredicates() + .size()); // Virtual Host level retry policy kicks in. EXPECT_EQ(std::chrono::milliseconds(1000), @@ -3465,6 +3545,11 @@ TEST_F(RouteMatcherTest, RetryVirtualHostLevel) { EXPECT_EQ(8U, config.route(genHeaders("www.lyft.com", "/", "GET"), 0) ->routeEntry() ->retryShadowBufferLimit()); + EXPECT_EQ(1U, config.route(genHeaders("www.lyft.com", "/", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .retryOptionsPredicates() + .size()); } TEST_F(RouteMatcherTest, GrpcRetry) { @@ -5591,7 +5676,8 @@ TEST_F(RouteMatcherTest, TestOpaqueConfig) { EXPECT_EQ(opaque_config.find("name2")->second, "value2"); } -// Test that the deprecated name works for opaque configs. +// Test that the deprecated name no longer works by default for opaque configs. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestOpaqueConfigUsingDeprecatedName)) { const std::string yaml = R"EOF( virtual_hosts: @@ -5611,13 +5697,8 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestOpaqueConfigUsingDeprecated )EOF"; factory_context_.cluster_manager_.initializeClusters({"ats"}, {}); - TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); - - const std::multimap& opaque_config = - config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->opaqueConfig(); - - EXPECT_EQ(opaque_config.find("name1")->second, "value1"); - EXPECT_EQ(opaque_config.find("name2")->second, "value2"); + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), + EnvoyException); } class RoutePropertyTest : public testing::Test, public ConfigImplTestBase {}; diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 5963d3063a277..ef956636f488f 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -64,6 +64,20 @@ using testing::ReturnRef; namespace Envoy { namespace Router { +// Allows verifying the state of the upstream StreamInfo +class TestAccessLog : public AccessLog::Instance { +public: + explicit TestAccessLog(std::function func) : func_(func) {} + + void log(const Http::RequestHeaderMap*, const Http::ResponseHeaderMap*, + const Http::ResponseTrailerMap*, const StreamInfo::StreamInfo& info) override { + func_(info); + } + +private: + std::function func_; +}; + class RouterTest : public RouterTestBase { public: RouterTest() : RouterTestBase(false, false, false, Protobuf::RepeatedPtrField{}) { @@ -141,65 +155,130 @@ class RouterTest : public RouterTestBase { router_.onDestroy(); } + + void testAutoSniOptions( + absl::optional dummy_option, + Envoy::Http::TestRequestHeaderMapImpl headers, std::string server_name = "host", + bool should_validate_san = false, std::string alt_server_name = "host") { + NiceMock stream_info; + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocolOptions()) + .WillByDefault(ReturnRef(dummy_option)); + ON_CALL(callbacks_.stream_info_, filterState()) + .WillByDefault(ReturnRef(stream_info.filterState())); + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) + .WillOnce(Return(&cancellable_)); + stream_info.filterState()->setData(Network::UpstreamServerName::key(), + std::make_unique("dummy"), + StreamInfo::FilterState::StateType::Mutable); + expectResponseTimerCreate(); + + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); + EXPECT_EQ(server_name, + stream_info.filterState() + ->getDataReadOnly(Network::UpstreamServerName::key()) + .value()); + if (should_validate_san) { + EXPECT_EQ(alt_server_name, stream_info.filterState() + ->getDataReadOnly( + Network::UpstreamSubjectAltNames::key()) + .value()[0]); + } + EXPECT_CALL(cancellable_, cancel(_)); + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + } }; -TEST_F(RouterTest, UpdateServerNameFilterState) { - NiceMock stream_info; +TEST_F(RouterTest, UpdateServerNameFilterStateWithoutHeaderOverride) { auto dummy_option = absl::make_optional(); dummy_option.value().set_auto_sni(true); - ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocolOptions()) - .WillByDefault(ReturnRef(dummy_option)); - ON_CALL(callbacks_.stream_info_, filterState()) - .WillByDefault(ReturnRef(stream_info.filterState())); - EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) - .WillOnce(Return(&cancellable_)); - stream_info.filterState()->setData(Network::UpstreamServerName::key(), - std::make_unique("dummy"), - StreamInfo::FilterState::StateType::Mutable); - expectResponseTimerCreate(); - Http::TestRequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers{}; + testAutoSniOptions(dummy_option, headers); +} - HttpTestUtility::addDefaultHeaders(headers); - router_.decodeHeaders(headers, true); - EXPECT_EQ("host", - stream_info.filterState() - ->getDataReadOnly(Network::UpstreamServerName::key()) - .value()); - EXPECT_CALL(cancellable_, cancel(_)); - router_.onDestroy(); - EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); - EXPECT_EQ(0U, - callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(0U, - callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); +TEST_F(RouterTest, UpdateServerNameFilterStateWithHostHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_override_auto_sni_header(":authority"); + + Http::TestRequestHeaderMapImpl headers{}; + testAutoSniOptions(dummy_option, headers); +} + +TEST_F(RouterTest, UpdateServerNameFilterStateWithHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + const auto server_name = "foo.bar"; + Http::TestRequestHeaderMapImpl headers{{"x-host", server_name}}; + testAutoSniOptions(dummy_option, headers, server_name); } -TEST_F(RouterTest, UpdateSubjectAltNamesFilterState) { - NiceMock stream_info; +TEST_F(RouterTest, UpdateServerNameFilterStateWithEmptyValueHeaderOverride) { auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + Http::TestRequestHeaderMapImpl headers{{"x-host", ""}}; + testAutoSniOptions(dummy_option, headers); +} + +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithoutHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); dummy_option.value().set_auto_san_validation(true); - ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocolOptions()) - .WillByDefault(ReturnRef(dummy_option)); - ON_CALL(callbacks_.stream_info_, filterState()) - .WillByDefault(ReturnRef(stream_info.filterState())); - EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) - .WillOnce(Return(&cancellable_)); - expectResponseTimerCreate(); - Http::TestRequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers{}; + testAutoSniOptions(dummy_option, headers, "host", true); +} - HttpTestUtility::addDefaultHeaders(headers); - router_.decodeHeaders(headers, true); - EXPECT_EQ("host", stream_info.filterState() - ->getDataReadOnly( - Network::UpstreamSubjectAltNames::key()) - .value()[0]); - EXPECT_CALL(cancellable_, cancel(_)); - router_.onDestroy(); - EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); - EXPECT_EQ(0U, - callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithHostHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_auto_san_validation(true); + dummy_option.value().set_override_auto_sni_header(":authority"); + + Http::TestRequestHeaderMapImpl headers{}; + testAutoSniOptions(dummy_option, headers, "host", true); +} + +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_auto_san_validation(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + const auto server_name = "foo.bar"; + Http::TestRequestHeaderMapImpl headers{{"x-host", server_name}}; + testAutoSniOptions(dummy_option, headers, server_name, true, server_name); +} + +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithEmptyValueHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_auto_san_validation(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + Http::TestRequestHeaderMapImpl headers{{"x-host", ""}}; + testAutoSniOptions(dummy_option, headers, "host", true); +} + +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithIpHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_auto_san_validation(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + const auto server_name = "127.0.0.1"; + Http::TestRequestHeaderMapImpl headers{{"x-host", server_name}}; + testAutoSniOptions(dummy_option, headers, "dummy", true, server_name); } TEST_F(RouterTest, RouteNotFound) { @@ -333,43 +412,6 @@ TEST_F(RouterTest, PoolFailureDueToConnectTimeout) { "upstream_reset_before_response_started{connection failure,connect_timeout}"); } -TEST_F(RouterTest, PoolFailureDueToConnectTimeoutLegacy) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure", "false"}}); - ON_CALL(callbacks_.route_->route_entry_, priority()) - .WillByDefault(Return(Upstream::ResourcePriority::High)); - EXPECT_CALL(cm_.thread_local_cluster_, - httpConnPool(Upstream::ResourcePriority::High, _, &router_)); - EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) - .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks) - -> Http::ConnectionPool::Cancellable* { - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Timeout, "connect_timeout", - cm_.thread_local_cluster_.conn_pool_.host_); - return nullptr; - })); - - Http::TestResponseHeaderMapImpl response_headers{ - {":status", "503"}, {"content-length", "127"}, {"content-type", "text/plain"}}; - EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); - EXPECT_CALL(callbacks_, encodeData(_, true)); - EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::LocalReset)); - EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) - .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { - EXPECT_EQ(host_address_, host->address()); - })); - - Http::TestRequestHeaderMapImpl headers; - HttpTestUtility::addDefaultHeaders(headers); - router_.decodeHeaders(headers, true); - EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); - // Pool failure, so upstream request was not initiated. - EXPECT_EQ(0U, - callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(callbacks_.details(), - "upstream_reset_before_response_started{local reset,connect_timeout}"); -} - TEST_F(RouterTest, Http1Upstream) { EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, absl::optional(), _)); EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) @@ -832,7 +874,18 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestNotOverwritten) { /* expected_count */ 123); } +class MockRetryOptionsPredicate : public Upstream::RetryOptionsPredicate { +public: + MOCK_METHOD(UpdateOptionsReturn, updateOptions, (const UpdateOptionsParameters& parameters), + (const)); +}; + +// Also verify retry options predicates work. TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { + auto retry_options_predicate = std::make_shared(); + callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back( + retry_options_predicate); + setIncludeAttemptCountInRequest(true); NiceMock encoder1; @@ -859,13 +912,21 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { // 5xx response. router_.retry_state_->expectHeadersRetry(); + Upstream::RetryOptionsPredicate::UpdateOptionsReturn update_options_return{ + std::make_shared()}; + EXPECT_CALL(*retry_options_predicate, updateOptions(_)).WillOnce(Return(update_options_return)); Http::ResponseHeaderMapPtr response_headers1( new Http::TestResponseHeaderMapImpl{{":status", "503"}}); EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503)); + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) response_decoder->decodeHeaders(std::move(response_headers1), true); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + // Verify retry options predicate return values have been updated. + EXPECT_EQ(update_options_return.new_upstream_socket_options_.value(), + router_.upstreamSocketOptions()); + // We expect the 5xx response to kick off a new request. EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0); NiceMock encoder2; @@ -1875,6 +1936,140 @@ TEST_F(RouterTest, UpstreamTimeoutWithAltResponse) { EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } +// Verify the upstream per try idle timeout. +TEST_F(RouterTest, UpstreamPerTryIdleTimeout) { + InSequence s; + + callbacks_.route_->route_entry_.retry_policy_.per_try_idle_timeout_ = + std::chrono::milliseconds(3000); + + // This pattern helps ensure that we're actually invoking the callback. + bool filter_state_verified = false; + router_.config().upstream_logs_.push_back( + std::make_shared([&](const auto& stream_info) { + filter_state_verified = + stream_info.hasResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); + })); + + NiceMock encoder; + Http::ResponseDecoder* response_decoder = nullptr; + Http::ConnectionPool::Callbacks* pool_callbacks; + + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + pool_callbacks = &callbacks; + return nullptr; + })); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*response_timeout_, enableTimer(_, _)); + + Buffer::OwnedImpl data; + router_.decodeData(data, true); + + EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) + .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { + EXPECT_EQ(host_address_, host->address()); + })); + + per_try_idle_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + // The per try timeout timer should not be started yet. + pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_, + upstream_stream_info_, Http::Protocol::Http10); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + + EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset)); + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginTimeout, _)); + EXPECT_CALL(*per_try_idle_timeout_, disableTimer()); + EXPECT_CALL(callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); + EXPECT_CALL(*response_timeout_, disableTimer()); + EXPECT_CALL(callbacks_.stream_info_, setResponseCodeDetails("upstream_per_try_idle_timeout")); + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}}; + EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); + EXPECT_CALL(callbacks_, encodeData(_, true)); + per_try_idle_timeout_->invokeCallback(); + + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_per_try_idle_timeout") + .value()); + EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value()); + EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + EXPECT_TRUE(filter_state_verified); +} + +// Verify the upstream per try idle timeout gets reset in the success case. +TEST_F(RouterTest, UpstreamPerTryIdleTimeoutSuccess) { + InSequence s; + + callbacks_.route_->route_entry_.retry_policy_.per_try_idle_timeout_ = + std::chrono::milliseconds(3000); + + NiceMock encoder; + Http::ResponseDecoder* response_decoder = nullptr; + Http::ConnectionPool::Callbacks* pool_callbacks; + + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + pool_callbacks = &callbacks; + return nullptr; + })); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*response_timeout_, enableTimer(_, _)); + + Buffer::OwnedImpl data; + router_.decodeData(data, true); + + EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) + .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { + EXPECT_EQ(host_address_, host->address()); + })); + + per_try_idle_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + // The per try timeout timer should not be started yet. + pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_, + upstream_stream_info_, Http::Protocol::Http10); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "200"}}); + response_decoder->decodeHeaders(std::move(response_headers), false); + + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + response_decoder->decodeData(data, false); + + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + EXPECT_CALL(*per_try_idle_timeout_, disableTimer()); + EXPECT_CALL(*response_timeout_, disableTimer()); + response_decoder->decodeData(data, true); +} + // Verifies that the per try timeout is initialized once the downstream request has been read. TEST_F(RouterTest, UpstreamPerTryTimeout) { NiceMock encoder; @@ -1899,7 +2094,7 @@ TEST_F(RouterTest, UpstreamPerTryTimeout) { router_.decodeHeaders(headers, false); // We verify that both timeouts are started after decodeData(_, true) is called. This - // verifies that we are not starting the initial per try timeout on the first onPoolReady.FOO + // verifies that we are not starting the initial per try timeout on the first onPoolReady. expectPerTryTimerCreate(); expectResponseTimerCreate(); @@ -1927,7 +2122,7 @@ TEST_F(RouterTest, UpstreamPerTryTimeout) { EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } -// Verifies that the per try timeout starts when onPoolReady is called when it occursFOO +// Verifies that the per try timeout starts when onPoolReady is called when it occurs // after the downstream request has been read. TEST_F(RouterTest, UpstreamPerTryTimeoutDelayedPoolReady) { NiceMock encoder; @@ -1952,7 +2147,7 @@ TEST_F(RouterTest, UpstreamPerTryTimeoutDelayedPoolReady) { Buffer::OwnedImpl data; router_.decodeData(data, true); - // Per try timeout starts when onPoolReady is called.FOO + // Per try timeout starts when onPoolReady is called. expectPerTryTimerCreate(); EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { @@ -2047,8 +2242,12 @@ TEST_F(RouterTest, UpstreamPerTryTimeoutExcludesNewStream) { // Tests that a retry is sent after the first request hits the per try timeout, but then // headers received in response to the first request are still used (and the 2nd request -// canceled). +// canceled). Also verify retry options predicates work. TEST_F(RouterTest, HedgedPerTryTimeoutFirstRequestSucceeds) { + auto retry_options_predicate = std::make_shared(); + callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back( + retry_options_predicate); + enableHedgeOnPerTryTimeout(); NiceMock encoder1; @@ -2083,6 +2282,7 @@ TEST_F(RouterTest, HedgedPerTryTimeoutFirstRequestSucceeds) { NiceMock encoder2; Http::ResponseDecoder* response_decoder2 = nullptr; router_.retry_state_->expectHedgedPerTryTimeoutRetry(); + EXPECT_CALL(*retry_options_predicate, updateOptions(_)); per_try_timeout_->invokeCallback(); EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) @@ -2501,8 +2701,12 @@ TEST_F(RouterTest, BadHeadersDroppedIfPreviousRetryScheduled) { } // Test retrying a request, when the first attempt fails before the client -// has sent any of the body. +// has sent any of the body. Also verify retry options predicates work. TEST_F(RouterTest, RetryRequestBeforeBody) { + auto retry_options_predicate = std::make_shared(); + callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back( + retry_options_predicate); + NiceMock encoder1; Http::ResponseDecoder* response_decoder = nullptr; EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) @@ -2522,6 +2726,7 @@ TEST_F(RouterTest, RetryRequestBeforeBody) { router_.decodeHeaders(headers, false); router_.retry_state_->expectResetRetry(); + EXPECT_CALL(*retry_options_predicate, updateOptions(_)); encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset); NiceMock encoder2; @@ -2553,6 +2758,7 @@ TEST_F(RouterTest, RetryRequestBeforeBody) { .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ(headers.Status()->value(), "200"); })); + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); } @@ -4838,20 +5044,6 @@ TEST_F(RouterTest, DirectResponseWithoutLocation) { EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } -// Allows verifying the state of the upstream StreamInfo -class TestAccessLog : public AccessLog::Instance { -public: - explicit TestAccessLog(std::function func) : func_(func) {} - - void log(const Http::RequestHeaderMap*, const Http::ResponseHeaderMap*, - const Http::ResponseTrailerMap*, const StreamInfo::StreamInfo& info) override { - func_(info); - } - -private: - std::function func_; -}; - // Verifies that we propagate the upstream connection filter state to the upstream and downstream // request filter state. TEST_F(RouterTest, PropagatesUpstreamFilterState) { @@ -5968,6 +6160,7 @@ TEST_F(RouterTest, ConnectPauseAndResume) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); headers.setMethod("CONNECT"); + headers.removePath(); router_.decodeHeaders(headers, false); // Make sure any early data does not go upstream. @@ -6040,6 +6233,7 @@ TEST_F(RouterTest, ConnectPauseNoResume) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); headers.setMethod("CONNECT"); + headers.removePath(); router_.decodeHeaders(headers, false); // Make sure any early data does not go upstream. @@ -6070,6 +6264,7 @@ TEST_F(RouterTest, ConnectExplicitTcpUpstream) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); headers.setMethod("CONNECT"); + headers.removePath(); router_.decodeHeaders(headers, false); router_.onDestroy(); diff --git a/test/common/router/router_test_base.cc b/test/common/router/router_test_base.cc index 671bbee7b588e..dc598c6e4f010 100644 --- a/test/common/router/router_test_base.cc +++ b/test/common/router/router_test_base.cc @@ -51,6 +51,11 @@ void RouterTestBase::expectPerTryTimerCreate() { EXPECT_CALL(*per_try_timeout_, disableTimer()); } +void RouterTestBase::expectPerTryIdleTimerCreate(std::chrono::milliseconds timeout) { + per_try_idle_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(timeout, _)); +} + void RouterTestBase::expectMaxStreamDurationTimerCreate(std::chrono::milliseconds duration_msec) { max_stream_duration_timer_ = new Event::MockTimer(&callbacks_.dispatcher_); EXPECT_CALL(*max_stream_duration_timer_, enableTimer(Eq(duration_msec), _)); diff --git a/test/common/router/router_test_base.h b/test/common/router/router_test_base.h index 9ac4a94bfc92f..0dff3f0ade1e7 100644 --- a/test/common/router/router_test_base.h +++ b/test/common/router/router_test_base.h @@ -58,6 +58,7 @@ class RouterTestBase : public testing::Test { void expectResponseTimerCreate(); void expectPerTryTimerCreate(); + void expectPerTryIdleTimerCreate(std::chrono::milliseconds timeout); void expectMaxStreamDurationTimerCreate(std::chrono::milliseconds duration_msec); AssertionResult verifyHostUpstreamStats(uint64_t success, uint64_t error); void verifyMetadataMatchCriteriaFromRequest(bool route_entry_has_match); @@ -97,6 +98,7 @@ class RouterTestBase : public testing::Test { RouterTestFilter router_; Event::MockTimer* response_timeout_{}; Event::MockTimer* per_try_timeout_{}; + Event::MockTimer* per_try_idle_timeout_{}; Event::MockTimer* max_stream_duration_timer_{}; Network::Address::InstanceConstSharedPtr host_address_{ Network::Utility::resolveUrl("tcp://10.0.0.5:9211")}; diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index 15fc8fa59421b..a73f143ba8613 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -279,7 +279,7 @@ TEST_F(SecretManagerImplTest, DeduplicateDynamicTlsCertificateSecretProvider) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); envoy::config::core::v3::ConfigSource config_source; @@ -362,7 +362,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { })); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); @@ -408,7 +408,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicGenericSecret) { Init::TargetHandlePtr init_target_handle; NiceMock init_watcher; - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, messageValidationVisitor()).WillOnce(ReturnRef(validation_visitor)); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); @@ -461,7 +461,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandler) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); auto secret_provider = @@ -731,7 +731,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); auto secret_provider = @@ -879,7 +879,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSecrets) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); const std::string tls_certificate = @@ -956,7 +956,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticValidationContext) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); const std::string validation_context = @@ -1004,7 +1004,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSessionTicketsContext) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); const std::string stek_context = @@ -1085,7 +1085,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretPrivateKeyProviderUpdateSuccess) { })); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); diff --git a/test/common/singleton/manager_impl_test.cc b/test/common/singleton/manager_impl_test.cc index d3184bae8f580..64e08cdb93848 100644 --- a/test/common/singleton/manager_impl_test.cc +++ b/test/common/singleton/manager_impl_test.cc @@ -42,6 +42,23 @@ TEST(SingletonManagerImplTest, Basic) { singleton.reset(); } +TEST(SingletonManagerImplTest, NonConstructingGetTyped) { + ManagerImpl manager(Thread::threadFactoryForTest()); + + // Access without first constructing should be null. + EXPECT_EQ(nullptr, manager.getTyped("test_singleton")); + + std::shared_ptr singleton = std::make_shared(); + // Use a construct on first use getter. + EXPECT_EQ(singleton, manager.get("test_singleton", [singleton] { return singleton; })); + // Now access should return the constructed singleton. + EXPECT_EQ(singleton, manager.getTyped("test_singleton")); + EXPECT_EQ(1UL, singleton.use_count()); + + EXPECT_CALL(*singleton, onDestroy()); + singleton.reset(); +} + } // namespace } // namespace Singleton } // namespace Envoy diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index cc3e4b21f360d..41f27fac08eac 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -22,6 +22,14 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "custom_stat_namespaces_impl_test", + srcs = ["custom_stat_namespaces_impl_test.cc"], + deps = [ + "//source/common/stats:custom_stat_namespaces_lib", + ], +) + envoy_cc_test( name = "isolated_store_impl_test", srcs = ["isolated_store_impl_test.cc"], @@ -265,6 +273,7 @@ envoy_cc_test( "//test/mocks/stats:stats_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:logging_lib", + "//test/test_common:real_threads_test_helper_lib", "//test/test_common:test_time_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", diff --git a/test/common/stats/custom_stat_namespaces_impl_test.cc b/test/common/stats/custom_stat_namespaces_impl_test.cc new file mode 100644 index 0000000000000..e0500fe5edcf7 --- /dev/null +++ b/test/common/stats/custom_stat_namespaces_impl_test.cc @@ -0,0 +1,36 @@ +#include "source/common/stats/custom_stat_namespaces_impl.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Stats { + +TEST(CustomStatNamespacesImpl, Registration) { + CustomStatNamespacesImpl namespaces; + const std::string name = "foo"; + EXPECT_FALSE(namespaces.registered(name)); + namespaces.registerStatNamespace(name); + EXPECT_TRUE(namespaces.registered(name)); + EXPECT_FALSE(namespaces.registered("bar")); +} + +TEST(CustomStatNamespacesImpl, StripRegisteredPrefix) { + CustomStatNamespacesImpl namespaces; + // no namespace is registered. + EXPECT_FALSE(namespaces.stripRegisteredPrefix("foo.bar").has_value()); + namespaces.registerStatNamespace("foo"); + // namespace is not registered. + EXPECT_FALSE(namespaces.stripRegisteredPrefix("bar.my.value").has_value()); + EXPECT_FALSE(namespaces.stripRegisteredPrefix("foobar.my.value").has_value()); + // "." is not present in the stat name - we skip these cases. + EXPECT_FALSE(namespaces.stripRegisteredPrefix("foo").has_value()); + EXPECT_FALSE(namespaces.stripRegisteredPrefix("bar").has_value()); + // Should be stripped. + const absl::optional actual = + namespaces.stripRegisteredPrefix("foo.my.extension.metric"); + EXPECT_TRUE(actual.has_value()); + EXPECT_EQ(actual.value(), "my.extension.metric"); +} + +} // namespace Stats +} // namespace Envoy diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 5bf5e67395437..0847d9605df29 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -20,11 +20,10 @@ #include "test/mocks/stats/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/logging.h" +#include "test/test_common/real_threads_test_helper.h" #include "test/test_common/utility.h" #include "absl/strings/str_split.h" -#include "absl/synchronization/blocking_counter.h" -#include "absl/synchronization/notification.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -1565,75 +1564,36 @@ TEST_F(HistogramTest, ParentHistogramBucketSummary) { "B3.6e+06(1,1)", parent_histogram->bucketSummary()); } - -class ThreadLocalRealThreadsTestBase : public ThreadLocalStoreNoMocksTestBase { +class ThreadLocalRealThreadsTestBase : public Thread::RealThreadsTestHelper, + public ThreadLocalStoreNoMocksTestBase { protected: static constexpr uint32_t NumScopes = 1000; static constexpr uint32_t NumIters = 35; - // Helper class to block on a number of multi-threaded operations occurring. - class BlockingBarrier { - public: - explicit BlockingBarrier(uint32_t count) : blocking_counter_(count) {} - ~BlockingBarrier() { blocking_counter_.Wait(); } - - /** - * Returns a function that first executes 'f', and then decrements the count - * toward unblocking the scope. This is intended to be used as a post() callback. - * - * @param f the function to run prior to decrementing the count. - */ - std::function run(std::function f) { - return [this, f]() { - f(); - decrementCount(); - }; - } - - /** - * @return a function that, when run, decrements the count, intended for passing to post(). - */ - std::function decrementCountFn() { - return [this] { decrementCount(); }; - } - - void decrementCount() { blocking_counter_.DecrementCount(); } - - private: - absl::BlockingCounter blocking_counter_; - }; - +public: ThreadLocalRealThreadsTestBase(uint32_t num_threads) - : num_threads_(num_threads), api_(Api::createApiForTest()), - thread_factory_(api_->threadFactory()), pool_(store_->symbolTable()) { - // This is the same order as InstanceImpl::initialize in source/server/server.cc. - thread_dispatchers_.resize(num_threads_); - { - BlockingBarrier blocking_barrier(num_threads_ + 1); - main_thread_ = thread_factory_.createThread( - [this, &blocking_barrier]() { mainThreadFn(blocking_barrier); }); - for (uint32_t i = 0; i < num_threads_; ++i) { - threads_.emplace_back(thread_factory_.createThread( - [this, i, &blocking_barrier]() { workerThreadFn(i, blocking_barrier); })); - } - } - - { - BlockingBarrier blocking_barrier(1); - main_dispatcher_->post(blocking_barrier.run([this]() { - tls_ = std::make_unique(); - tls_->registerThread(*main_dispatcher_, true); - for (Event::DispatcherPtr& dispatcher : thread_dispatchers_) { - // Worker threads must be registered from the main thread, per assert in registerThread(). - tls_->registerThread(*dispatcher, false); - } - store_->initializeThreading(*main_dispatcher_, *tls_); - })); - } + : RealThreadsTestHelper(num_threads), pool_(store_->symbolTable()) { + runOnMainBlocking([this]() { store_->initializeThreading(*main_dispatcher_, *tls_); }); } ~ThreadLocalRealThreadsTestBase() override { + // TODO(chaoqin-li1123): clean this up when we figure out how to free the threading resources in + // RealThreadsTestHelper. shutdownThreading(); + exitThreads(); + } + + void shutdownThreading() { + runOnMainBlocking([this]() { + if (!tls_->isShutdown()) { + tls_->shutdownGlobalThreading(); + } + store_->shutdownThreading(); + tls_->shutdownThread(); + }); + } + + void exitThreads() { for (Event::DispatcherPtr& dispatcher : thread_dispatchers_) { dispatcher->post([&dispatcher]() { dispatcher->exit(); }); } @@ -1650,52 +1610,6 @@ class ThreadLocalRealThreadsTestBase : public ThreadLocalStoreNoMocksTestBase { main_thread_->join(); } - void shutdownThreading() { - BlockingBarrier blocking_barrier(1); - main_dispatcher_->post(blocking_barrier.run([this]() { - if (!tls_->isShutdown()) { - tls_->shutdownGlobalThreading(); - } - store_->shutdownThreading(); - tls_->shutdownThread(); - })); - } - - void workerThreadFn(uint32_t thread_index, BlockingBarrier& blocking_barrier) { - thread_dispatchers_[thread_index] = - api_->allocateDispatcher(absl::StrCat("test_worker_", thread_index)); - blocking_barrier.decrementCount(); - thread_dispatchers_[thread_index]->run(Event::Dispatcher::RunType::RunUntilExit); - } - - void mainThreadFn(BlockingBarrier& blocking_barrier) { - main_dispatcher_ = api_->allocateDispatcher("test_main_thread"); - blocking_barrier.decrementCount(); - main_dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); - } - - void mainDispatchBlock() { - // To ensure all stats are freed we have to wait for a few posts() to clear. - // First, wait for the main-dispatcher to initiate the cross-thread TLS cleanup. - BlockingBarrier blocking_barrier(1); - main_dispatcher_->post(blocking_barrier.run([]() {})); - } - - void tlsBlock() { - BlockingBarrier blocking_barrier(num_threads_); - for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { - thread_dispatcher->post(blocking_barrier.run([]() {})); - } - } - - const uint32_t num_threads_; - Api::ApiPtr api_; - Event::DispatcherPtr main_dispatcher_; - std::vector thread_dispatchers_; - Thread::ThreadFactory& thread_factory_; - ThreadLocal::InstanceImplPtr tls_; - Thread::ThreadPtr main_thread_; - std::vector threads_; StatNamePool pool_; }; @@ -1717,11 +1631,8 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalRealThreadsTestBa } void createScopesIncCountersAndCleanupAllThreads() { - BlockingBarrier blocking_barrier(NumThreads); - for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { - thread_dispatcher->post( - blocking_barrier.run([this]() { createScopesIncCountersAndCleanup(); })); - } + + runOnAllWorkersBlocking([this]() { createScopesIncCountersAndCleanup(); }); } std::chrono::seconds elapsedTime() { @@ -1795,7 +1706,7 @@ class HistogramThreadTest : public ThreadLocalRealThreadsTestBase { void mergeHistograms() { BlockingBarrier blocking_barrier(1); - main_dispatcher_->post([this, &blocking_barrier]() { + runOnMainBlocking([this, &blocking_barrier]() { store_->mergeHistograms(blocking_barrier.decrementCountFn()); }); } @@ -1804,7 +1715,7 @@ class HistogramThreadTest : public ThreadLocalRealThreadsTestBase { uint32_t num; { BlockingBarrier blocking_barrier(1); - main_dispatcher_->post([this, &num, &blocking_barrier]() { + runOnMainBlocking([this, &num, &blocking_barrier]() { ThreadLocalStoreTestingPeer::numTlsHistograms(*store_, [&num, &blocking_barrier](uint32_t num_hist) { num = num_hist; @@ -1817,10 +1728,7 @@ class HistogramThreadTest : public ThreadLocalRealThreadsTestBase { // Executes a function on every worker thread dispatcher. void foreachThread(const std::function& fn) { - BlockingBarrier blocking_barrier(NumThreads); - for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { - thread_dispatcher->post(blocking_barrier.run(fn)); - } + runOnAllWorkersBlocking([&fn]() { fn(); }); } }; diff --git a/test/common/thread_local/thread_local_impl_test.cc b/test/common/thread_local/thread_local_impl_test.cc index 6e2586db580ac..d9a774e8bf9ad 100644 --- a/test/common/thread_local/thread_local_impl_test.cc +++ b/test/common/thread_local/thread_local_impl_test.cc @@ -17,19 +17,17 @@ namespace ThreadLocal { TEST(MainThreadVerificationTest, All) { // Before threading is on, assertion on main thread should be true. - EXPECT_TRUE(Thread::MainThread::isMainThread()); - EXPECT_TRUE(Thread::MainThread::isWorkerThread()); + EXPECT_TRUE(Thread::MainThread::isMainOrTestThread()); { InstanceImpl tls; // Tls instance has been initialized. // Call to main thread verification should succeed in main thread. - EXPECT_TRUE(Thread::MainThread::isMainThread()); - EXPECT_FALSE(Thread::MainThread::isWorkerThread()); + EXPECT_TRUE(Thread::MainThread::isMainOrTestThread()); tls.shutdownGlobalThreading(); tls.shutdownThread(); } // After threading is off, assertion on main thread should be true. - EXPECT_TRUE(Thread::MainThread::isMainThread()); + EXPECT_TRUE(Thread::MainThread::isMainOrTestThread()); } class TestThreadLocalObject : public ThreadLocalObject { @@ -303,7 +301,7 @@ TEST(ThreadLocalInstanceImplDispatcherTest, Dispatcher) { // Verify we have the expected dispatcher for the new thread thread. EXPECT_EQ(thread_dispatcher.get(), &tls.dispatcher()); // Verify that it is inside the worker thread. - EXPECT_FALSE(Thread::MainThread::isMainThread()); + EXPECT_FALSE(Thread::MainThread::isMainOrTestThread()); }); thread->join(); diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 684e16be97e75..8cef078804972 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -827,6 +827,7 @@ envoy_proto_library( srcs = ["round_robin_load_balancer_fuzz.proto"], deps = [ "//test/common/upstream:zone_aware_load_balancer_fuzz_proto", + "@envoy_api//envoy/config/cluster/v3:pkg", ], ) diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 86f133ecef107..bfadeb85b292b 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -524,10 +524,7 @@ class ClusterManagerSubsetInitializationTest for (int i = first; i <= last; i++) { if (envoy::config::cluster::v3::Cluster::LbPolicy_IsValid(i)) { auto policy = static_cast(i); - if (policy != - envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB) { - policies.push_back(policy); - } + policies.push_back(policy); } } return policies; @@ -4960,6 +4957,51 @@ TEST_F(ClusterManagerImplTest, ConnectionPoolPerDownstreamConnection) { Http::Protocol::Http11, &lb_context))); } +TEST_F(ClusterManagerImplTest, CheckActiveStaticCluster) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: good + connect_timeout: 0.250s + lb_policy: ROUND_ROBIN + type: STATIC + load_assignment: + cluster_name: good + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 + )EOF"; + create(parseBootstrapFromV3Yaml(yaml)); + const std::string added_via_api_yaml = R"EOF( + name: added_via_api + connect_timeout: 0.250s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: added_via_api + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 + )EOF"; + EXPECT_TRUE( + cluster_manager_->addOrUpdateCluster(parseClusterFromV3Yaml(added_via_api_yaml), "v1")); + + EXPECT_EQ(2, cluster_manager_->clusters().active_clusters_.size()); + EXPECT_NO_THROW(cluster_manager_->checkActiveStaticCluster("good")); + EXPECT_THROW_WITH_MESSAGE(cluster_manager_->checkActiveStaticCluster("nonexist"), EnvoyException, + "Unknown gRPC client cluster 'nonexist'"); + EXPECT_THROW_WITH_MESSAGE(cluster_manager_->checkActiveStaticCluster("added_via_api"), + EnvoyException, "gRPC client cluster 'added_via_api' is not static"); +} + class PreconnectTest : public ClusterManagerImplTest { public: void initialize(float ratio) { diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index a3d987500aeb0..8d9376539d204 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -2308,6 +2308,29 @@ TEST_F(EdsAssignmentTimeoutTest, AssignmentLeaseExpired) { } } +// Validate that onConfigUpdate() with a config that contains both LEDS config +// source and explicit list of endpoints is rejected. +TEST_F(EdsTest, OnConfigUpdateLedsAndEndpoints) { + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + // Add an endpoint. + auto* endpoints = cluster_load_assignment.add_endpoints(); + auto* endpoint = endpoints->add_lb_endpoints(); + endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address("1.2.3.4"); + endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80); + // Configure an LEDS data source. + auto* leds_conf = endpoints->mutable_leds_cluster_locality_config(); + leds_conf->set_leds_collection_name("xdstp://foo/leds/collection"); + initialize(); + + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); + EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, + "A ClusterLoadAssignment for cluster fare cannot include both LEDS " + "(resource: xdstp://foo/leds/collection) and a list of endpoints."); +} + } // namespace } // namespace Upstream } // namespace Envoy diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index cc5fa210e69b5..ba2dfe80fce2e 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -3135,14 +3135,17 @@ TEST(HttpStatusChecker, Default) { path: /healthcheck )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); - EXPECT_TRUE(http_status_checker.inRange(200)); - EXPECT_FALSE(http_status_checker.inRange(204)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(200)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(204)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(200)); } -TEST(HttpStatusChecker, Single100) { +TEST(HttpStatusChecker, SingleExpected100) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3157,17 +3160,44 @@ TEST(HttpStatusChecker, Single100) { end: 101 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); - EXPECT_FALSE(http_status_checker.inRange(200)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(200)); - EXPECT_FALSE(http_status_checker.inRange(99)); - EXPECT_TRUE(http_status_checker.inRange(100)); - EXPECT_FALSE(http_status_checker.inRange(101)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(99)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(100)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(101)); } -TEST(HttpStatusChecker, Single599) { +TEST(HttpStatusChecker, SingleRetriable100) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + retriable_statuses: + - start: 100 + end: 101 + )EOF"; + + auto conf = parseHealthCheckFromV3Yaml(yaml); + HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); + + EXPECT_FALSE(http_status_checker.inRetriableRanges(99)); + EXPECT_TRUE(http_status_checker.inRetriableRanges(100)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(101)); +} + +TEST(HttpStatusChecker, SingleExpected599) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3182,17 +3212,44 @@ TEST(HttpStatusChecker, Single599) { end: 600 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); + + EXPECT_FALSE(http_status_checker.inExpectedRanges(200)); + + EXPECT_FALSE(http_status_checker.inExpectedRanges(598)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(599)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(600)); +} + +TEST(HttpStatusChecker, SingleRetriable599) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + retriable_statuses: + - start: 599 + end: 600 + )EOF"; - EXPECT_FALSE(http_status_checker.inRange(200)); + auto conf = parseHealthCheckFromV3Yaml(yaml); + HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); - EXPECT_FALSE(http_status_checker.inRange(598)); - EXPECT_TRUE(http_status_checker.inRange(599)); - EXPECT_FALSE(http_status_checker.inRange(600)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(598)); + EXPECT_TRUE(http_status_checker.inRetriableRanges(599)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(600)); } -TEST(HttpStatusChecker, Ranges_204_304) { +TEST(HttpStatusChecker, ExpectedRanges_204_304) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3209,20 +3266,52 @@ TEST(HttpStatusChecker, Ranges_204_304) { end: 305 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); + + EXPECT_FALSE(http_status_checker.inExpectedRanges(200)); + + EXPECT_FALSE(http_status_checker.inExpectedRanges(203)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(204)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(205)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(303)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(304)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(305)); +} + +TEST(HttpStatusChecker, RetriableRanges_304_404) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + retriable_statuses: + - start: 304 + end: 305 + - start: 404 + end: 405 + )EOF"; - EXPECT_FALSE(http_status_checker.inRange(200)); + auto conf = parseHealthCheckFromV3Yaml(yaml); + HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); - EXPECT_FALSE(http_status_checker.inRange(203)); - EXPECT_TRUE(http_status_checker.inRange(204)); - EXPECT_FALSE(http_status_checker.inRange(205)); - EXPECT_FALSE(http_status_checker.inRange(303)); - EXPECT_TRUE(http_status_checker.inRange(304)); - EXPECT_FALSE(http_status_checker.inRange(305)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(303)); + EXPECT_TRUE(http_status_checker.inRetriableRanges(304)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(305)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(403)); + EXPECT_TRUE(http_status_checker.inRetriableRanges(404)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(405)); } -TEST(HttpStatusChecker, Below100) { +TEST(HttpStatusChecker, ExpectedBelow100) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3237,13 +3326,40 @@ TEST(HttpStatusChecker, Below100) { end: 100 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); + EXPECT_THROW_WITH_MESSAGE( + HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), + EnvoyException, + "Invalid http expected status range: expecting start >= 100, but found start=99"); +} + +TEST(HttpStatusChecker, RetriableBelow100) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + retriable_statuses: + - start: 99 + end: 100 + )EOF"; + + auto conf = parseHealthCheckFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), - EnvoyException, "Invalid http status range: expecting start >= 100, but found start=99"); + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), + EnvoyException, + "Invalid http retriable status range: expecting start >= 100, but found start=99"); } -TEST(HttpStatusChecker, Above599) { +TEST(HttpStatusChecker, ExpectedAbove599) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3258,13 +3374,16 @@ TEST(HttpStatusChecker, Above599) { end: 601 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), - EnvoyException, "Invalid http status range: expecting end <= 600, but found end=601"); + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), + EnvoyException, + "Invalid http expected status range: expecting end <= 600, but found end=601"); } -TEST(HttpStatusChecker, InvalidRange) { +TEST(HttpStatusChecker, RetriableAbove599) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3274,19 +3393,21 @@ TEST(HttpStatusChecker, InvalidRange) { service_name_matcher: prefix: locations path: /healthchecka - expected_statuses: - - start: 200 - end: 200 + retriable_statuses: + - start: 600 + end: 601 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), EnvoyException, - "Invalid http status range: expecting start < end, but found start=200 and end=200"); + "Invalid http retriable status range: expecting end <= 600, but found end=601"); } -TEST(HttpStatusChecker, InvalidRange2) { +TEST(HttpStatusChecker, InvalidExpectedRange) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3297,15 +3418,41 @@ TEST(HttpStatusChecker, InvalidRange2) { prefix: locations path: /healthchecka expected_statuses: - - start: 201 + - start: 200 end: 200 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), EnvoyException, - "Invalid http status range: expecting start < end, but found start=201 and end=200"); + "Invalid http expected status range: expecting start < end, but found start=200 and end=200"); +} + +TEST(HttpStatusChecker, InvalidRetriableRange) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthchecka + retriable_statuses: + - start: 200 + end: 200 + )EOF"; + + auto conf = parseHealthCheckFromV3Yaml(yaml); + EXPECT_THROW_WITH_MESSAGE(HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), + EnvoyException, + "Invalid http retriable status range: expecting start < end, but found " + "start=200 and end=200"); } TEST(TcpHealthCheckMatcher, loadJsonBytes) { diff --git a/test/common/upstream/least_request_load_balancer_fuzz_test.cc b/test/common/upstream/least_request_load_balancer_fuzz_test.cc index 85b0689f4d1ee..2bc4958d44e49 100644 --- a/test/common/upstream/least_request_load_balancer_fuzz_test.cc +++ b/test/common/upstream/least_request_load_balancer_fuzz_test.cc @@ -65,7 +65,7 @@ DEFINE_PROTO_FUZZER(const test::common::upstream::LeastRequestLoadBalancerTestCa zone_aware_load_balancer_fuzz.stats_, zone_aware_load_balancer_fuzz.runtime_, zone_aware_load_balancer_fuzz.random_, zone_aware_load_balancer_test_case.load_balancer_test_case().common_lb_config(), - input.least_request_lb_config()); + input.least_request_lb_config(), zone_aware_load_balancer_fuzz.simTime()); } catch (EnvoyException& e) { ENVOY_LOG_MISC(debug, "EnvoyException; {}", e.what()); removeRequestsActiveForStaticHosts(zone_aware_load_balancer_fuzz.priority_set_); diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc index 22f58f0f9337e..6a855bae9aacc 100644 --- a/test/common/upstream/load_balancer_benchmark.cc +++ b/test/common/upstream/load_balancer_benchmark.cc @@ -71,6 +71,7 @@ class BaseTester : public Event::TestUsingSimulatedTime { NiceMock runtime_; Random::RandomGeneratorImpl random_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; + envoy::config::cluster::v3::Cluster::RoundRobinLbConfig round_robin_lb_config_; std::shared_ptr info_{new NiceMock()}; }; @@ -81,7 +82,8 @@ class RoundRobinTester : public BaseTester { void initialize() { lb_ = std::make_unique(priority_set_, &local_priority_set_, stats_, - runtime_, random_, common_config_); + runtime_, random_, common_config_, + round_robin_lb_config_, simTime()); } std::unique_ptr lb_; @@ -92,9 +94,9 @@ class LeastRequestTester : public BaseTester { LeastRequestTester(uint64_t num_hosts, uint32_t choice_count) : BaseTester(num_hosts) { envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; lr_lb_config.mutable_choice_count()->set_value(choice_count); - lb_ = - std::make_unique(priority_set_, &local_priority_set_, stats_, - runtime_, random_, common_config_, lr_lb_config); + lb_ = std::make_unique(priority_set_, &local_priority_set_, stats_, + runtime_, random_, common_config_, + lr_lb_config, simTime()); } std::unique_ptr lb_; @@ -541,10 +543,10 @@ class SubsetLbTester : public BaseTester { *selector->mutable_keys()->Add() = std::string(metadata_key); subset_info_ = std::make_unique(subset_config); - lb_ = std::make_unique(LoadBalancerType::Random, priority_set_, - &local_priority_set_, stats_, stats_store_, runtime_, - random_, *subset_info_, absl::nullopt, absl::nullopt, - absl::nullopt, common_config_); + lb_ = std::make_unique( + LoadBalancerType::Random, priority_set_, &local_priority_set_, stats_, stats_store_, + runtime_, random_, *subset_info_, absl::nullopt, absl::nullopt, absl::nullopt, + absl::nullopt, common_config_, simTime()); const HostVector& hosts = priority_set_.getOrCreateHostSet(0).hosts(); ASSERT(hosts.size() == num_hosts); diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index dea39058ef38c..78e6d1f746918 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -32,6 +32,19 @@ using testing::ReturnRef; namespace Envoy { namespace Upstream { + +class EdfLoadBalancerBasePeer { +public: + static const std::chrono::milliseconds& slowStartWindow(EdfLoadBalancerBase& edf_lb) { + return edf_lb.slow_start_window_; + } + static double aggression(EdfLoadBalancerBase& edf_lb) { return edf_lb.aggression_; } + static const std::chrono::milliseconds latestHostAddedTime(EdfLoadBalancerBase& edf_lb) { + return std::chrono::time_point_cast(edf_lb.latest_host_added_time_) + .time_since_epoch(); + } +}; + namespace { static constexpr uint32_t UnhealthyStatus = 1u << static_cast(Host::Health::Unhealthy); @@ -62,6 +75,7 @@ class LoadBalancerTestBase : public Event::TestUsingSimulatedTime, std::shared_ptr info_{new NiceMock()}; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config_; + envoy::config::cluster::v3::Cluster::RoundRobinLbConfig round_robin_lb_config_; }; class TestLb : public LoadBalancerBase { @@ -232,8 +246,8 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionFuzz) { const auto hs = lb_.chooseHostSet(&context, 0); switch (hs.second) { case LoadBalancerBase::HostAvailability::Healthy: - // Either we selected one of the healthy hosts or we failed to select anything and defaulted - // to healthy. + // Either we selected one of the healthy hosts or we failed to select anything and + // defaulted to healthy. EXPECT_TRUE(!hs.first.healthyHosts().empty() || (hs.first.healthyHosts().empty() && hs.first.degradedHosts().empty())); break; @@ -319,7 +333,9 @@ TEST_P(LoadBalancerBaseTest, GentleFailover) { // Health P=0 == 100*1.4 == 35 P=1 == 35 // Since 3 hosts are excluded, P=0 should be considered fully healthy. // Total health = 100% + 35% is greater than 100%. Panic should not trigger. - updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */, 0 /* num_degraded_hosts */, + updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */, 0 /* num_degraded_hosts + */ + , 3 /* num_excluded_hosts */); updateHostSet(failover_host_set_, 5 /* num_hosts */, 1 /* num_healthy_hosts */); ASSERT_THAT(getLoadPercentage(), ElementsAre(100, 0)); @@ -330,7 +346,9 @@ TEST_P(LoadBalancerBaseTest, GentleFailover) { // All priorities are in panic mode (situation called TotalPanic) // Load is distributed based on number of hosts regardless of their health status. // P=0 and P=1 have 4 hosts each so each priority will receive 50% of the traffic. - updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts */, + updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts + */ + , 4 /* num_excluded_hosts */); updateHostSet(failover_host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */); ASSERT_THAT(getLoadPercentage(), ElementsAre(50, 50)); @@ -342,7 +360,9 @@ TEST_P(LoadBalancerBaseTest, GentleFailover) { // P=0 has 4 hosts with 1 excluded, P=1 has 6 hosts with 2 excluded. // P=0 should receive 4/(4+6)=40% of traffic // P=1 should receive 6/(4+6)=60% of traffic - updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts */, + updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts + */ + , 1 /* num_excluded_hosts */); updateHostSet(failover_host_set_, 6 /* num_hosts */, 1 /* num_healthy_hosts */, 0 /* num_degraded_hosts */, 2 /* num_excluded_hosts */); @@ -646,7 +666,8 @@ class RoundRobinLoadBalancerTest : public LoadBalancerTestBase { local_priority_set_->getOrCreateHostSet(0); } lb_ = std::make_shared(priority_set_, local_priority_set_.get(), stats_, - runtime_, random_, common_config_); + runtime_, random_, common_config_, + round_robin_lb_config_, simTime()); } // Updates priority 0 with the given hosts and hosts_per_locality. @@ -1375,8 +1396,8 @@ TEST_P(RoundRobinLoadBalancerTest, LowPrecisionForDistribution) { // The following host distribution with current precision should lead to the no_capacity_left // situation. - // Reuse the same host in all of the structures below to reduce time test takes and this does not - // impact load balancing logic. + // Reuse the same host in all of the structures below to reduce time test takes and this does + // not impact load balancing logic. HostSharedPtr host = makeTestHost(info_, "tcp://127.0.0.1:80", simTime()); HostVector current(45000); @@ -1555,10 +1576,302 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNoLocalLocality) { INSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, RoundRobinLoadBalancerTest, ::testing::Values(true, false)); +TEST_P(RoundRobinLoadBalancerTest, SlowStartWithDefaultParams) { + init(false); + const auto slow_start_window = + EdfLoadBalancerBasePeer::slowStartWindow(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(0), slow_start_window); + const auto aggression = + EdfLoadBalancerBasePeer::aggression(static_cast(*lb_)); + EXPECT_EQ(1.0, aggression); + const auto latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(0), latest_host_added_time); +} + +TEST_P(RoundRobinLoadBalancerTest, SlowStartNoWait) { + round_robin_lb_config_.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(60); + simTime().advanceTimeWait(std::chrono::seconds(1)); + auto host1 = makeTestHost(info_, "tcp://127.0.0.1:80", simTime()); + host_set_.hosts_ = {host1}; + + init(true); + + // As no healthcheck is configured, hosts would enter slow start immediately. + HostVector empty; + HostVector hosts_added; + hosts_added.push_back(host1); + simTime().advanceTimeWait(std::chrono::seconds(5)); + hostSet().runCallbacks(hosts_added, empty); + auto latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(1000), latest_host_added_time_ms); + + // Advance time, so that host is no longer in slow start. + simTime().advanceTimeWait(std::chrono::seconds(56)); + + hosts_added.clear(); + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + + hosts_added.push_back(host2); + + hostSet().healthy_hosts_ = {host1, host2}; + hostSet().hosts_ = hostSet().healthy_hosts_; + hostSet().runCallbacks(hosts_added, empty); + + latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(62000), latest_host_added_time_ms); + + // host2 is 12 secs in slow start, the weight is scaled with time factor 12 / 60 == 0.2. + simTime().advanceTimeWait(std::chrono::seconds(12)); + + // Recalculate weights. + hostSet().runCallbacks(empty, empty); + + // We expect 4:1 ratio, as host2 is in slow start mode and it's weight is scaled with + // 0.2 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + + // host2 is 20 secs in slow start, the weight is scaled with time factor 20 / 60 == 0.33. + simTime().advanceTimeWait(std::chrono::seconds(8)); + + // Recalculate weights. + hostSet().runCallbacks(empty, empty); + + // We expect 2:1 ratio, as host2 is in slow start mode and it's weight is scaled with + // 0.33 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + + // Advance time, so that there are no hosts in slow start. + simTime().advanceTimeWait(std::chrono::seconds(45)); + + // Recalculate weights. + hostSet().runCallbacks(empty, empty); + + // Now expect 1:1 ratio. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); +} + +TEST_P(RoundRobinLoadBalancerTest, SlowStartWaitForPassingHC) { + round_robin_lb_config_.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(10); + simTime().advanceTimeWait(std::chrono::seconds(1)); + auto host1 = makeTestHost(info_, "tcp://127.0.0.1:80", simTime()); + host1->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + + host_set_.hosts_ = {host1}; + + init(true); + + HostVector empty; + HostVector hosts_added; + hosts_added.push_back(host1); + simTime().advanceTimeWait(std::chrono::seconds(1)); + hostSet().runCallbacks(hosts_added, empty); + auto latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(1000), latest_host_added_time_ms); + + simTime().advanceTimeWait(std::chrono::seconds(5)); + + hosts_added.clear(); + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + hosts_added.push_back(host2); + + hostSet().hosts_ = {host1, host2}; + hostSet().runCallbacks(hosts_added, empty); + + // As host1 has not passed first HC, it should not enter slow start mode. + latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(7000), latest_host_added_time_ms); + + simTime().advanceTimeWait(std::chrono::seconds(1)); + host1->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC); + hostSet().healthy_hosts_ = {host1, host2}; + // Trigger callbacks to add host1 to slow start mode. + hostSet().runCallbacks({}, {}); + + simTime().advanceTimeWait(std::chrono::seconds(1)); + host1->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + // Trigger callbacks to remove host1 from slow start mode. + hostSet().runCallbacks({}, {}); + simTime().advanceTimeWait(std::chrono::seconds(4)); + // Trigger callbacks to remove host1 from slow start mode. + hostSet().runCallbacks({}, {}); + + // We expect 3:1 ratio, as host2 is in slow start mode, its weight is scaled with time factor + // 5 / 10 == 0.5. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + + // Advance time, so there are no hosts in slow start. + simTime().advanceTimeWait(std::chrono::seconds(20)); + hostSet().runCallbacks({}, {}); + + // We expect 1:1 ratio, as there are no hosts in slow start mode. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); +} + +TEST_P(RoundRobinLoadBalancerTest, SlowStartWithRuntimeAggression) { + round_robin_lb_config_.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(10); + round_robin_lb_config_.mutable_slow_start_config()->mutable_aggression()->set_runtime_key( + "aggression"); + round_robin_lb_config_.mutable_slow_start_config()->mutable_aggression()->set_default_value(1.0); + + init(true); + EXPECT_CALL(runtime_.snapshot_, getDouble("aggression", 1.0)).WillRepeatedly(Return(1.0)); + + simTime().advanceTimeWait(std::chrono::seconds(1)); + + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:100", simTime(), 1)}; + + hostSet().hosts_ = hostSet().healthy_hosts_; + hostSet().runCallbacks({}, {}); + + simTime().advanceTimeWait(std::chrono::seconds(5)); + hostSet().healthy_hosts_[0]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + hostSet().runCallbacks({}, {}); + + auto latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(1000), latest_host_added_time_ms); + + // We should see 2:1:1 ratio, as hosts 2 and 3 are in slow start, their weights are scaled with + // 0.5 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + + simTime().advanceTimeWait(std::chrono::seconds(4)); + HostVector hosts_added; + auto host4 = makeTestHost(info_, "tcp://127.0.0.1:110", simTime()); + hostSet().hosts_.push_back(host4); + hostSet().healthy_hosts_.push_back(host4); + EXPECT_CALL(runtime_.snapshot_, getDouble("aggression", 1.0)).WillRepeatedly(Return(1.5)); + // Recompute edf schedulers. + hostSet().runCallbacks(hosts_added, {}); + + latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(10000), latest_host_added_time_ms); + + // We should see 1:1:1:0 ratio, as host 2 and 3 weight is scaled with (9/10)^(1/1.5)=0.93 factor, + // host4 weight is 0.002. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); + + // host4 is 9 seconds in slow start, it's weight is scaled with (9/10)^(1/1.5)=0.93 factor. + simTime().advanceTimeWait(std::chrono::seconds(9)); + hostSet().runCallbacks({}, {}); + + // We should see 1:1:1:1 ratio, only host4 is in slow start with weight 0.93, and the rest of + // hosts are outside of slow start with weight 1. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_->chooseHost(nullptr)); +} + +TEST_P(RoundRobinLoadBalancerTest, SlowStartNoWaitNonLinearAggression) { + round_robin_lb_config_.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(60); + round_robin_lb_config_.mutable_slow_start_config()->mutable_aggression()->set_runtime_key( + "aggression"); + round_robin_lb_config_.mutable_slow_start_config()->mutable_aggression()->set_default_value(2.0); + simTime().advanceTimeWait(std::chrono::seconds(1)); + + init(true); + + // As no healthcheck is configured, hosts would enter slow start immediately. + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + hostSet().hosts_ = hostSet().healthy_hosts_; + simTime().advanceTimeWait(std::chrono::seconds(5)); + // Host1 is 5 secs in slow start, its weight is scaled with (0.5/60)^(1/2)=0.28 factor. + hostSet().runCallbacks({}, {}); + + // Advance time, so that host1 is no longer in slow start. + simTime().advanceTimeWait(std::chrono::seconds(56)); + + HostVector hosts_added; + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + + hosts_added.push_back(host2); + + hostSet().healthy_hosts_.push_back(host2); + hostSet().hosts_ = hostSet().healthy_hosts_; + // host2 weight is scaled with 0.004 factor. + hostSet().runCallbacks(hosts_added, {}); + + // host2 is 6 secs in slow start. + simTime().advanceTimeWait(std::chrono::seconds(6)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + // We expect 3:1 ratio, as host2 is 6 secs in slow start mode and it's weight is scaled with + // pow(0.1, 0.5)==0.31 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + + // host2 is 26 secs in slow start. + simTime().advanceTimeWait(std::chrono::seconds(20)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + // We still expect 5:3 ratio, as host2 is in slow start mode and it's weight is scaled with + // pow(0.43, 0.5)==0.65 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + + // Advance time, so that there are no hosts in slow start. + simTime().advanceTimeWait(std::chrono::seconds(41)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + // Now expect 1:1 ratio. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); +} + class LeastRequestLoadBalancerTest : public LoadBalancerTestBase { public: LeastRequestLoadBalancer lb_{ - priority_set_, nullptr, stats_, runtime_, random_, common_config_, least_request_lb_config_}; + priority_set_, nullptr, stats_, runtime_, random_, common_config_, least_request_lb_config_, + simTime()}; }; TEST_P(LeastRequestLoadBalancerTest, NoHosts) { EXPECT_EQ(nullptr, lb_.chooseHost(nullptr)); } @@ -1635,11 +1948,11 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { // Creating various load balancer objects with different choice configs. envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; lr_lb_config.mutable_choice_count()->set_value(2); - LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config}; + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; lr_lb_config.mutable_choice_count()->set_value(5); - LeastRequestLoadBalancer lb_5{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config}; + LeastRequestLoadBalancer lb_5{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; // Verify correct number of choices. @@ -1715,8 +2028,8 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithInvalidActiveRequestBias envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); - LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config}; + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(-1.0)); @@ -1769,8 +2082,8 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithCustomActiveRequestBias) envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); - LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config}; + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(0.0)); @@ -1815,6 +2128,197 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceCallbacks) { EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } +TEST_P(LeastRequestLoadBalancerTest, SlowStartWithDefaultParams) { + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + const auto slow_start_window = + EdfLoadBalancerBasePeer::slowStartWindow(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(0), slow_start_window); + const auto aggression = + EdfLoadBalancerBasePeer::aggression(static_cast(lb_2)); + EXPECT_EQ(1.0, aggression); + const auto latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(0), latest_host_added_time); +} + +TEST_P(LeastRequestLoadBalancerTest, SlowStartNoWait) { + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + lr_lb_config.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(60); + lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); + lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + simTime().advanceTimeWait(std::chrono::seconds(1)); + + // As no healthcheck is configured, hosts would enter slow start immediately. + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + hostSet().hosts_ = hostSet().healthy_hosts_; + simTime().advanceTimeWait(std::chrono::seconds(5)); + // Host1 is 5 secs in slow start, its weight is scaled with (5/60)^1=0.08 factor. + hostSet().runCallbacks({}, {}); + + auto latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(1000), latest_host_added_time); + + // Advance time, so that host is no longer in slow start. + simTime().advanceTimeWait(std::chrono::seconds(56)); + + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + hostSet().healthy_hosts_.push_back(host2); + hostSet().hosts_ = hostSet().healthy_hosts_; + HostVector hosts_added; + hosts_added.push_back(host2); + + hostSet().runCallbacks(hosts_added, {}); + + latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(62000), latest_host_added_time); + + // host2 is 20 secs in slow start, the weight is scaled with time factor 20 / 60 == 0.16. + simTime().advanceTimeWait(std::chrono::seconds(10)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + hostSet().healthy_hosts_[0]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(0); + + // We expect 3:1 ratio, as host2 is in slow start mode and it's weight is scaled with + // 0.16 factor and host1 weight with 0.5 factor (due to active request bias). + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + + // host2 is 50 secs in slow start, the weight is scaled with time factor 40 / 60 == 0.66. + simTime().advanceTimeWait(std::chrono::seconds(30)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + // We expect 4:3 ratio, as host2 is in slow start mode and it's weight is scaled with + // 0.66 factor and host1 weight with 0.5 factor. + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); +} + +TEST_P(LeastRequestLoadBalancerTest, SlowStartWaitForPassingHC) { + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + lr_lb_config.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(10); + lr_lb_config.mutable_slow_start_config()->mutable_aggression()->set_runtime_key("aggression"); + lr_lb_config.mutable_slow_start_config()->mutable_aggression()->set_default_value(0.9); + lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); + lr_lb_config.mutable_active_request_bias()->set_default_value(0.9); + + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + + simTime().advanceTimeWait(std::chrono::seconds(1)); + auto host1 = makeTestHost(info_, "tcp://127.0.0.1:80", simTime()); + host1->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + + host_set_.hosts_ = {host1}; + + HostVector hosts_added; + hosts_added.push_back(host1); + simTime().advanceTimeWait(std::chrono::seconds(1)); + hostSet().runCallbacks(hosts_added, {}); + + auto latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(0), latest_host_added_time); + + simTime().advanceTimeWait(std::chrono::seconds(5)); + + hosts_added.clear(); + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + hosts_added.push_back(host2); + + hostSet().healthy_hosts_ = {host1, host2}; + hostSet().hosts_ = hostSet().healthyHosts(); + hostSet().runCallbacks(hosts_added, {}); + + latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(7000), latest_host_added_time); + + simTime().advanceTimeWait(std::chrono::seconds(1)); + host1->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC); + hostSet().healthy_hosts_ = {host1, host2}; + + hostSet().healthy_hosts_[0]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(0); + + hostSet().healthy_hosts_ = {host1, host2}; + hostSet().hosts_ = hostSet().healthyHosts(); + + // Trigger callbacks to add host1 to slow start mode. + hostSet().runCallbacks({}, {}); + + // We expect 11:2 ratio, as host2 is in slow start mode, its weight is scaled with factor + // pow(0.1, 1.11)=0.07. Host1 is 7 seconds in slow start and its weight is scaled with active + // request and time bias 0.53 * pow(0.7, 1.11) = 0.36. + + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + + simTime().advanceTimeWait(std::chrono::seconds(3)); + host1->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + // Trigger callbacks to remove host1 from slow start mode. + hostSet().runCallbacks({}, {}); + + // We expect 3:5 ratio, as host2 is 4 seconds in slow start, its weight is scaled with factor + // pow(0.4, 1.11)=0.36. Host1 is not in slow start and its weight is scaled with active + // request bias = 0.53. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + + // Host2 is 7 seconds in slow start, the weight is scaled with time factor 7 / 10 == 0.6. + simTime().advanceTimeWait(std::chrono::seconds(3)); + + hostSet().runCallbacks({}, {}); + + // We expect 6:5 ratio, as host2 is in slow start mode, its weight is scaled with time factor + // pow(0.7, 1.11)=0.67. Host1 weight is scaled with active request bias = 0.53. + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); +} + INSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, LeastRequestLoadBalancerTest, ::testing::Values(true, false)); diff --git a/test/common/upstream/load_balancer_simulation_test.cc b/test/common/upstream/load_balancer_simulation_test.cc index 28ded32dd029a..22d081562f1ad 100644 --- a/test/common/upstream/load_balancer_simulation_test.cc +++ b/test/common/upstream/load_balancer_simulation_test.cc @@ -74,11 +74,13 @@ TEST(DISABLED_LeastRequestLoadBalancerWeightTest, Weight) { ClusterStats stats{ClusterInfoImpl::generateStats(stats_store, stat_names)}; stats.max_host_weight_.set(weight); NiceMock runtime; + auto time_source = std::make_unique>(); Random::RandomGeneratorImpl random; envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config; LeastRequestLoadBalancer lb_{ - priority_set, nullptr, stats, runtime, random, common_config, least_request_lb_config}; + priority_set, nullptr, stats, runtime, random, common_config, least_request_lb_config, + *time_source}; absl::node_hash_map host_hits; const uint64_t total_requests = 100; diff --git a/test/common/upstream/maglev_lb_test.cc b/test/common/upstream/maglev_lb_test.cc index 562d4f0e90fa6..d8472c5a5aefe 100644 --- a/test/common/upstream/maglev_lb_test.cc +++ b/test/common/upstream/maglev_lb_test.cc @@ -103,6 +103,19 @@ TEST_F(MaglevLoadBalancerTest, SelectOverrideHost) { EXPECT_EQ(mock_host, lb_->factory()->create()->chooseHost(&context)); } +// Test for thread aware load balancer destructed before load balancer factory. After CDS removes a +// cluster, the operation does not immediately reach the worker thread. There may be cases where the +// thread aware load balancer is destructed, but the load balancer factory is still used in the +// worker thread. +TEST_F(MaglevLoadBalancerTest, LbDestructedBeforeFactory) { + init(7); + + auto factory = lb_->factory(); + lb_.reset(); + + EXPECT_NE(nullptr, factory->create()); +} + // Throws an exception if table size is not a prime number. TEST_F(MaglevLoadBalancerTest, NoPrimeNumber) { EXPECT_THROW_WITH_MESSAGE(init(8), EnvoyException, diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index c259f610c54af..9d5b2c4141eff 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -120,6 +120,19 @@ TEST_P(RingHashLoadBalancerTest, SelectOverrideHost) { EXPECT_EQ(mock_host, lb_->factory()->create()->chooseHost(&context)); } +// Test for thread aware load balancer destructed before load balancer factory. After CDS removes a +// cluster, the operation does not immediately reach the worker thread. There may be cases where the +// thread aware load balancer is destructed, but the load balancer factory is still used in the +// worker thread. +TEST_P(RingHashLoadBalancerTest, LbDestructedBeforeFactory) { + init(); + + auto factory = lb_->factory(); + lb_.reset(); + + EXPECT_NE(nullptr, factory->create()); +} + // Given minimum_ring_size > maximum_ring_size, expect an exception. TEST_P(RingHashLoadBalancerTest, BadRingSizeBounds) { config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig(); diff --git a/test/common/upstream/round_robin_load_balancer_fuzz.proto b/test/common/upstream/round_robin_load_balancer_fuzz.proto index a5ecf67ccc1c9..60da8d6437680 100644 --- a/test/common/upstream/round_robin_load_balancer_fuzz.proto +++ b/test/common/upstream/round_robin_load_balancer_fuzz.proto @@ -4,9 +4,11 @@ syntax = "proto3"; package test.common.upstream; import "validate/validate.proto"; +import "envoy/config/cluster/v3/cluster.proto"; import "test/common/upstream/zone_aware_load_balancer_fuzz.proto"; message RoundRobinLoadBalancerTestCase { test.common.upstream.ZoneAwareLoadBalancerTestCase zone_aware_load_balancer_test_case = 1 [(validate.rules).message.required = true]; + envoy.config.cluster.v3.Cluster.RoundRobinLbConfig round_robin_lb_config = 2; } diff --git a/test/common/upstream/round_robin_load_balancer_fuzz_test.cc b/test/common/upstream/round_robin_load_balancer_fuzz_test.cc index 4c1809a9a2237..75a456f44c87f 100644 --- a/test/common/upstream/round_robin_load_balancer_fuzz_test.cc +++ b/test/common/upstream/round_robin_load_balancer_fuzz_test.cc @@ -31,7 +31,8 @@ DEFINE_PROTO_FUZZER(const test::common::upstream::RoundRobinLoadBalancerTestCase zone_aware_load_balancer_fuzz.local_priority_set_.get(), zone_aware_load_balancer_fuzz.stats_, zone_aware_load_balancer_fuzz.runtime_, zone_aware_load_balancer_fuzz.random_, - zone_aware_load_balancer_test_case.load_balancer_test_case().common_lb_config()); + zone_aware_load_balancer_test_case.load_balancer_test_case().common_lb_config(), + input.round_robin_lb_config(), zone_aware_load_balancer_fuzz.simTime()); } catch (EnvoyException& e) { ENVOY_LOG_MISC(debug, "EnvoyException; {}", e.what()); return; diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index 169202a448032..17952c30e5f78 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -201,7 +201,8 @@ class SubsetLoadBalancerTest : public Event::TestUsingSimulatedTime, lb_ = std::make_shared( lb_type_, priority_set_, nullptr, stats_, *scope_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + ring_hash_lb_config_, maglev_lb_config_, round_robin_lb_config_, least_request_lb_config_, + common_config_, simTime()); } void zoneAwareInit(const std::vector& host_metadata_per_locality, @@ -248,10 +249,10 @@ class SubsetLoadBalancerTest : public Event::TestUsingSimulatedTime, std::make_shared(), HostsPerLocalityImpl::empty()), {}, {}, {}, absl::nullopt); - lb_ = std::make_shared(lb_type_, priority_set_, &local_priority_set_, - stats_, *scope_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, - least_request_lb_config_, common_config_); + lb_ = std::make_shared( + lb_type_, priority_set_, &local_priority_set_, stats_, *scope_, runtime_, random_, + subset_info_, ring_hash_lb_config_, maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); } HostSharedPtr makeHost(const std::string& url, const HostMetadata& metadata) { @@ -475,6 +476,7 @@ class SubsetLoadBalancerTest : public Event::TestUsingSimulatedTime, envoy::config::cluster::v3::Cluster::RingHashLbConfig ring_hash_lb_config_; envoy::config::cluster::v3::Cluster::MaglevLbConfig maglev_lb_config_; envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config_; + envoy::config::cluster::v3::Cluster::RoundRobinLbConfig round_robin_lb_config_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; NiceMock runtime_; NiceMock random_; @@ -1458,9 +1460,10 @@ TEST_F(SubsetLoadBalancerTest, IgnoresHostsWithoutMetadata) { host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context_version({{"version", "1.0"}}); @@ -1877,9 +1880,10 @@ TEST_F(SubsetLoadBalancerTest, DisabledLocalityWeightAwareness) { }, host_set_, {1, 100}); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context({{"version", "1.1"}}); @@ -1900,9 +1904,10 @@ TEST_F(SubsetLoadBalancerTest, DoesNotCheckHostHealth) { EXPECT_CALL(*mock_host, weight()).WillRepeatedly(Return(1)); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); } TEST_F(SubsetLoadBalancerTest, EnabledLocalityWeightAwareness) { @@ -1923,9 +1928,10 @@ TEST_F(SubsetLoadBalancerTest, EnabledLocalityWeightAwareness) { }, host_set_, {1, 100}); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context({{"version", "1.1"}}); @@ -1958,9 +1964,10 @@ TEST_F(SubsetLoadBalancerTest, EnabledScaleLocalityWeights) { }, host_set_, {50, 50}); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context({{"version", "1.1"}}); // Since we scale the locality weights by number of hosts removed, we expect to see the second @@ -2003,9 +2010,10 @@ TEST_F(SubsetLoadBalancerTest, EnabledScaleLocalityWeightsRounding) { }, host_set_, {2, 2}); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context({{"version", "1.0"}}); // We expect to see a 33/66 split because 2 * 1 / 2 = 1 and 2 * 3 / 4 = 1.5 -> 2 @@ -2035,9 +2043,10 @@ TEST_F(SubsetLoadBalancerTest, ScaleLocalityWeightsWithNoLocalityWeights) { }, host_set_); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); } TEST_P(SubsetLoadBalancerTest, GaugesUpdatedOnDestroy) { diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h index 463f4fb2a9222..8bc91e7ac8772 100644 --- a/test/common/upstream/test_cluster_manager.h +++ b/test/common/upstream/test_cluster_manager.h @@ -115,6 +115,7 @@ class TestClusterManagerFactory : public ClusterManagerFactory { } Secret::SecretManager& secretManager() override { return secret_manager_; } + Singleton::Manager& singletonManager() override { return singleton_manager_; } MOCK_METHOD(ClusterManager*, clusterManagerFromProto_, (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 029b12bc33d8b..79efee3c96b90 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -2177,6 +2177,32 @@ TEST_F(StaticClusterImplTest, SourceAddressPriority) { } } +// LEDS is not supported with a static cluster at the moment. +TEST_F(StaticClusterImplTest, LedsUnsupported) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + endpoints: + leds_cluster_locality_config: + leds_collection_name: xdstp://foo/leds_collection_name + )EOF"; + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + EXPECT_THROW_WITH_MESSAGE( + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false), + EnvoyException, + "LEDS is only supported when EDS is used. Static cluster staticcluster cannot use LEDS."); +} + class ClusterImplTest : public testing::Test, public UpstreamImplTestBase {}; // Test that the correct feature() is set when close_connections_on_host_health_failure is @@ -3503,6 +3529,8 @@ TEST_F(ClusterInfoImplTest, Http3Auto) { http3_protocol_options: quic_protocol_options: max_concurrent_streams: 2 + alternate_protocols_cache_options: + name: default common_http_protocol_options: idle_timeout: 1s )EOF"; diff --git a/test/common/upstream/zone_aware_load_balancer_fuzz_base.h b/test/common/upstream/zone_aware_load_balancer_fuzz_base.h index be4a9ecb9a053..9e455027f312b 100644 --- a/test/common/upstream/zone_aware_load_balancer_fuzz_base.h +++ b/test/common/upstream/zone_aware_load_balancer_fuzz_base.h @@ -1,12 +1,14 @@ #pragma once #include "test/mocks/upstream/priority_set.h" +#include "test/test_common/simulated_time_system.h" #include "load_balancer_fuzz_base.h" namespace Envoy { namespace Upstream { -class ZoneAwareLoadBalancerFuzzBase : public LoadBalancerFuzzBase { +class ZoneAwareLoadBalancerFuzzBase : public Event::TestUsingSimulatedTime, + public LoadBalancerFuzzBase { public: ZoneAwareLoadBalancerFuzzBase(bool need_local_cluster, const std::string& random_bytestring) : random_bytestring_(random_bytestring) { diff --git a/test/common/watchdog/BUILD b/test/common/watchdog/BUILD index e1539697f6670..e5c085ccb660a 100644 --- a/test/common/watchdog/BUILD +++ b/test/common/watchdog/BUILD @@ -23,7 +23,7 @@ envoy_cc_test( "//test/common/stats:stat_test_utility_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) @@ -38,6 +38,6 @@ envoy_cc_test( "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) diff --git a/test/common/watchdog/abort_action_config_test.cc b/test/common/watchdog/abort_action_config_test.cc index f456687abc02d..5f9fad757f5f0 100644 --- a/test/common/watchdog/abort_action_config_test.cc +++ b/test/common/watchdog/abort_action_config_test.cc @@ -1,6 +1,6 @@ #include "envoy/registry/registry.h" #include "envoy/server/guarddog_config.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" #include "source/common/watchdog/abort_action_config.h" @@ -29,7 +29,7 @@ TEST(AbortActionFactoryTest, CanCreateAction) { "name": "envoy.watchdog.abort_action", "typed_config": { "@type": "type.googleapis.com/udpa.type.v1.TypedStruct", - "type_url": "type.googleapis.com/envoy.watchdog.abort_action.v3alpha.AbortActionConfig", + "type_url": "type.googleapis.com/envoy.watchdog.abort_action.v3.AbortActionConfig", "value": { "wait_duration": "2s", } diff --git a/test/common/watchdog/abort_action_test.cc b/test/common/watchdog/abort_action_test.cc index ebdbb8c870312..bfb83a6846c6e 100644 --- a/test/common/watchdog/abort_action_test.cc +++ b/test/common/watchdog/abort_action_test.cc @@ -6,7 +6,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" #include "source/common/watchdog/abort_action.h" #include "source/common/watchdog/abort_action_config.h" @@ -21,7 +21,7 @@ namespace Envoy { namespace Watchdog { namespace { -using AbortActionConfig = envoy::watchdog::v3alpha::AbortActionConfig; +using AbortActionConfig = envoy::watchdog::v3::AbortActionConfig; class AbortActionTest : public testing::Test { protected: diff --git a/test/config/BUILD b/test/config/BUILD index 9e5807cad1712..8c230a6adf076 100644 --- a/test/config/BUILD +++ b/test/config/BUILD @@ -34,11 +34,9 @@ envoy_cc_test_library( "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", - "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/transport_sockets/tap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/upstreams/http/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", diff --git a/test/config/utility.cc b/test/config/utility.cc index 0928efd680b86..0e16fe2b5b428 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -6,11 +6,9 @@ #include "envoy/config/endpoint/v3/endpoint.pb.h" #include "envoy/config/listener/v3/listener_components.pb.h" #include "envoy/config/route/v3/route_components.pb.h" -#include "envoy/config/tap/v3/common.pb.h" #include "envoy/extensions/access_loggers/file/v3/file.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h" -#include "envoy/extensions/transport_sockets/tap/v3/tap.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/http/codec.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -406,9 +404,12 @@ std::string ConfigHelper::adsBootstrap(const std::string& api_type) { } // TODO(samflattery): bundle this up with buildCluster -envoy::config::cluster::v3::Cluster -ConfigHelper::buildStaticCluster(const std::string& name, int port, const std::string& address) { - return TestUtility::parseYaml(fmt::format(R"EOF( +envoy::config::cluster::v3::Cluster ConfigHelper::buildStaticCluster(const std::string& name, + int port, + const std::string& address, + const std::string& lb_policy) { + return TestUtility::parseYaml( + fmt::format(R"EOF( name: {} connect_timeout: 5s type: STATIC @@ -421,15 +422,14 @@ ConfigHelper::buildStaticCluster(const std::string& name, int port, const std::s socket_address: address: {} port_value: {} - lb_policy: ROUND_ROBIN + lb_policy: {} typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions explicit_http_config: http2_protocol_options: {{}} )EOF", - name, name, - address, port)); + name, name, address, port, lb_policy)); } envoy::config::cluster::v3::Cluster ConfigHelper::buildCluster(const std::string& name, @@ -508,6 +508,39 @@ ConfigHelper::buildClusterLoadAssignment(const std::string& name, const std::str return cluster_load_assignment; } +envoy::config::endpoint::v3::ClusterLoadAssignment +ConfigHelper::buildClusterLoadAssignmentWithLeds(const std::string& name, + const std::string& leds_collection_name) { + API_NO_BOOST(envoy::config::endpoint::v3::ClusterLoadAssignment) cluster_load_assignment; + TestUtility::loadFromYaml(fmt::format(R"EOF( + cluster_name: {} + endpoints: + leds_cluster_locality_config: + leds_config: + resource_api_version: V3 + ads: {{}} + leds_collection_name: {} + )EOF", + name, leds_collection_name), + cluster_load_assignment); + return cluster_load_assignment; +} + +envoy::config::endpoint::v3::LbEndpoint ConfigHelper::buildLbEndpoint(const std::string& address, + uint32_t port) { + API_NO_BOOST(envoy::config::endpoint::v3::LbEndpoint) lb_endpoint; + TestUtility::loadFromYaml(fmt::format(R"EOF( + endpoint: + address: + socket_address: + address: {} + port_value: {} + )EOF", + address, port), + lb_endpoint); + return lb_endpoint; +} + envoy::config::listener::v3::Listener ConfigHelper::buildBaseListener(const std::string& name, const std::string& address, const std::string& filter_chains) { @@ -643,7 +676,7 @@ void ConfigHelper::addClusterFilterMetadata(absl::string_view metadata_yaml, void ConfigHelper::setConnectConfig( envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm, - bool terminate_connect, bool allow_post) { + bool terminate_connect, bool allow_post, bool http3) { auto* route_config = hcm.mutable_route_config(); ASSERT_EQ(1, route_config->virtual_hosts_size()); auto* route = route_config->mutable_virtual_hosts(0)->mutable_routes(0); @@ -671,6 +704,9 @@ void ConfigHelper::setConnectConfig( hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); hcm.mutable_http2_protocol_options()->set_allow_connect(true); + if (http3) { + hcm.mutable_http3_protocol_options()->set_allow_extended_connect(true); + } } void ConfigHelper::applyConfigModifiers() { @@ -680,9 +716,11 @@ void ConfigHelper::applyConfigModifiers() { config_modifiers_.clear(); } -void ConfigHelper::configureUpstreamTls(bool use_alpn, bool http3, - bool use_alternate_protocols_cache) { - addConfigModifier([use_alpn, http3, use_alternate_protocols_cache]( +void ConfigHelper::configureUpstreamTls( + bool use_alpn, bool http3, + absl::optional + alternate_protocol_cache_config) { + addConfigModifier([use_alpn, http3, alternate_protocol_cache_config]( envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); @@ -713,10 +751,13 @@ void ConfigHelper::configureUpstreamTls(bool use_alpn, bool http3, new_protocol_options.mutable_auto_config()->mutable_http3_protocol_options()->MergeFrom( old_protocol_options.explicit_http_config().http3_protocol_options()); } - if (use_alternate_protocols_cache) { + if (alternate_protocol_cache_config.has_value()) { new_protocol_options.mutable_auto_config() ->mutable_alternate_protocols_cache_options() ->set_name("default_alternate_protocols_cache"); + new_protocol_options.mutable_auto_config() + ->mutable_alternate_protocols_cache_options() + ->CopyFrom(alternate_protocol_cache_config.value()); } (*cluster->mutable_typed_extension_protocol_options()) ["envoy.extensions.upstreams.http.v3.HttpProtocolOptions"] @@ -747,11 +788,6 @@ void ConfigHelper::addRuntimeOverride(const std::string& key, const std::string& (*static_layer->mutable_fields())[std::string(key)] = ValueUtil::stringValue(std::string(value)); } -void ConfigHelper::enableDeprecatedV2Api() { - addRuntimeOverride("envoy.test_only.broken_in_production.enable_deprecated_v2_api", "true"); - addRuntimeOverride("envoy.features.enable_all_deprecated_features", "true"); -} - void ConfigHelper::setProtocolOptions(envoy::config::cluster::v3::Cluster& cluster, HttpProtocolOptions& protocol_options) { if (cluster.typed_extension_protocol_options().contains( @@ -783,22 +819,6 @@ void ConfigHelper::finalize(const std::vector& ports) { bool custom_cluster = false; bool original_dst_cluster = false; auto* static_resources = bootstrap_.mutable_static_resources(); - const auto tap_path = TestEnvironment::getOptionalEnvVar("TAP_PATH"); - if (tap_path) { - ENVOY_LOG_MISC(debug, "Test tap path set to {}", tap_path.value()); - } else { - ENVOY_LOG_MISC(debug, "No tap path set for tests"); - } - for (int i = 0; i < bootstrap_.mutable_static_resources()->listeners_size(); ++i) { - auto* listener = static_resources->mutable_listeners(i); - for (int j = 0; j < listener->filter_chains_size(); ++j) { - if (tap_path) { - auto* filter_chain = listener->mutable_filter_chains(j); - setTapTransportSocket(tap_path.value(), fmt::format("listener_{}_{}", i, j), - *filter_chain->mutable_transport_socket()); - } - } - } for (int i = 0; i < bootstrap_.mutable_static_resources()->clusters_size(); ++i) { auto* cluster = static_resources->mutable_clusters(i); if (cluster->type() == envoy::config::cluster::v3::Cluster::EDS) { @@ -828,11 +848,6 @@ void ConfigHelper::finalize(const std::vector& ports) { } } } - - if (tap_path) { - setTapTransportSocket(tap_path.value(), absl::StrCat("cluster_", i), - *cluster->mutable_transport_socket()); - } } ASSERT(skip_port_usage_validation_ || port_idx == ports.size() || eds_hosts || original_dst_cluster || custom_cluster || bootstrap_.dynamic_resources().has_cds_config()); @@ -851,39 +866,6 @@ void ConfigHelper::finalize(const std::vector& ports) { finalized_ = true; } -void ConfigHelper::setTapTransportSocket( - const std::string& tap_path, const std::string& type, - envoy::config::core::v3::TransportSocket& transport_socket) { - // Determine inner transport socket. - envoy::config::core::v3::TransportSocket inner_transport_socket; - if (!transport_socket.name().empty()) { - inner_transport_socket.MergeFrom(transport_socket); - } else { - inner_transport_socket.set_name("envoy.transport_sockets.raw_buffer"); - } - // Configure outer tap transport socket. - transport_socket.set_name("envoy.transport_sockets.tap"); - envoy::extensions::transport_sockets::tap::v3::Tap tap_config; - tap_config.mutable_common_config() - ->mutable_static_config() - ->mutable_match_config() - ->set_any_match(true); - auto* output_sink = tap_config.mutable_common_config() - ->mutable_static_config() - ->mutable_output_config() - ->mutable_sinks() - ->Add(); - output_sink->set_format(envoy::config::tap::v3::OutputSink::PROTO_TEXT); - const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); - const std::string test_id = - std::string(test_info->name()) + "_" + std::string(test_info->test_case_name()) + "_" + type; - output_sink->mutable_file_per_tap()->set_path_prefix(tap_path + "_" + - absl::StrReplaceAll(test_id, {{"/", "_"}})); - tap_config.mutable_transport_socket()->MergeFrom(inner_transport_socket); - transport_socket.mutable_typed_config()->PackFrom(tap_config); -} - void ConfigHelper::setSourceAddress(const std::string& address_string) { RELEASE_ASSERT(!finalized_, ""); bootstrap_.mutable_cluster_manager() diff --git a/test/config/utility.h b/test/config/utility.h index f421c95ba8c10..99ab62cbcc27e 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -146,8 +146,9 @@ class ConfigHelper { static std::string discoveredClustersBootstrap(const std::string& api_type); static std::string adsBootstrap(const std::string& api_type); // Builds a standard Cluster config fragment, with a single endpoint (at address:port). - static envoy::config::cluster::v3::Cluster buildStaticCluster(const std::string& name, int port, - const std::string& address); + static envoy::config::cluster::v3::Cluster + buildStaticCluster(const std::string& name, int port, const std::string& address, + const std::string& lb_policy = "ROUND_ROBIN"); // ADS configurations static envoy::config::cluster::v3::Cluster @@ -159,6 +160,13 @@ class ConfigHelper { static envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment(const std::string& name, const std::string& ip_version, uint32_t port); + static envoy::config::endpoint::v3::ClusterLoadAssignment + buildClusterLoadAssignmentWithLeds(const std::string& name, + const std::string& leds_collection_name); + + static envoy::config::endpoint::v3::LbEndpoint buildLbEndpoint(const std::string& address, + uint32_t port); + static envoy::config::listener::v3::Listener buildBaseListener(const std::string& name, const std::string& address, const std::string& filter_chains = ""); @@ -295,7 +303,8 @@ class ConfigHelper { // Configure Envoy to do TLS to upstream. void configureUpstreamTls(bool use_alpn = false, bool http3 = false, - bool use_alternate_protocols_cache = false); + absl::optional + alternate_protocol_cache_config = {}); // Skip validation that ensures that all upstream ports are referenced by the // configuration generated in ConfigHelper::finalize. @@ -304,16 +313,14 @@ class ConfigHelper { // Add this key value pair to the static runtime. void addRuntimeOverride(const std::string& key, const std::string& value); - // Enable deprecated v2 API resources via the runtime. - void enableDeprecatedV2Api(); - // Add filter_metadata to a cluster with the given name void addClusterFilterMetadata(absl::string_view metadata_yaml, absl::string_view cluster_name = "cluster_0"); // Given an HCM with the default config, set the matcher to be a connect matcher and enable // CONNECT requests. - static void setConnectConfig(HttpConnectionManager& hcm, bool terminate_connect, bool allow_post); + static void setConnectConfig(HttpConnectionManager& hcm, bool terminate_connect, bool allow_post, + bool http3 = false); void setLocalReply( const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& @@ -364,10 +371,6 @@ class ConfigHelper { // Finds the filter named 'name' from the first filter chain from the first listener. envoy::config::listener::v3::Filter* getFilterFromListener(const std::string& name); - // Configure a tap transport socket for a cluster/filter chain. - void setTapTransportSocket(const std::string& tap_path, const std::string& type, - envoy::config::core::v3::TransportSocket& transport_socket); - // The bootstrap proto Envoy will start up with. envoy::config::bootstrap::v3::Bootstrap bootstrap_; diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 876a501bd56ad..d6f8d800f9b66 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -27,6 +27,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::AtLeast; using testing::Invoke; using testing::NiceMock; using testing::Return; @@ -96,6 +97,12 @@ class ConfigTest { return snapshot_; })); + // For configuration/example tests we don't fail if WIP APIs are used. + EXPECT_CALL(server_.validation_context_.static_validation_visitor_, onWorkInProgress(_)) + .Times(AtLeast(0)); + EXPECT_CALL(server_.validation_context_.dynamic_validation_visitor_, onWorkInProgress(_)) + .Times(AtLeast(0)); + envoy::config::bootstrap::v3::Bootstrap bootstrap; Server::InstanceUtil::loadBootstrapConfig( bootstrap, options_, server_.messageValidationContext().staticValidationVisitor(), *api_); diff --git a/test/dependencies/curl_test.cc b/test/dependencies/curl_test.cc index e046db65a95d4..6218a3dea66d7 100644 --- a/test/dependencies/curl_test.cc +++ b/test/dependencies/curl_test.cc @@ -27,15 +27,7 @@ TEST(CurlTest, BuiltWithExpectedFeatures) { EXPECT_NE(0, info->features & CURL_VERSION_HTTP2); EXPECT_EQ(0, info->features & CURL_VERSION_GSSAPI); EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS5); -#ifndef WIN32 EXPECT_NE(0, info->features & CURL_VERSION_UNIX_SOCKETS); -#else - // TODO(wrowe): correct to expected, when curl 7.72 and later is patched - // or fixed upstream to include `afunix.h` in place of `sys/un.h` on recent - // Windows SDKs (it may be necessary to be more specific because older - // SDKs did not provide `afunix.h`) - EXPECT_EQ(0, info->features & CURL_VERSION_UNIX_SOCKETS); -#endif EXPECT_EQ(0, info->features & CURL_VERSION_PSL); EXPECT_EQ(0, info->features & CURL_VERSION_HTTPS_PROXY); EXPECT_EQ(0, info->features & CURL_VERSION_MULTI_SSL); diff --git a/test/extensions/access_loggers/common/grpc_access_logger_test.cc b/test/extensions/access_loggers/common/grpc_access_logger_test.cc index f2e125df17e06..ec6e35ab635e5 100644 --- a/test/extensions/access_loggers/common/grpc_access_logger_test.cc +++ b/test/extensions/access_loggers/common/grpc_access_logger_test.cc @@ -322,9 +322,9 @@ class MockGrpcAccessLoggerCache createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig&, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) override { + Event::Dispatcher& dispatcher) override { return std::make_shared( - std::move(client), buffer_flush_interval_msec, max_buffer_size_bytes, dispatcher, scope, + std::move(client), buffer_flush_interval_msec, max_buffer_size_bytes, dispatcher, scope_, "mock_access_log_prefix.", mockMethodDescriptor()); } }; @@ -336,7 +336,7 @@ class GrpcAccessLoggerCacheTest : public testing::Test { void expectClientCreation() { factory_ = new Grpc::MockAsyncClientFactory; async_client_ = new Grpc::MockAsyncClient; - EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, false)) + EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, true)) .WillOnce(Invoke([this](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { EXPECT_CALL(*factory_, createUncachedRawAsyncClient()).WillOnce(Invoke([this] { return Grpc::RawAsyncClientPtr{async_client_}; @@ -354,38 +354,31 @@ class GrpcAccessLoggerCacheTest : public testing::Test { }; TEST_F(GrpcAccessLoggerCacheTest, Deduplication) { - Stats::IsolatedStoreImpl scope; - envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig config; config.set_log_name("log-1"); config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("cluster-1"); expectClientCreation(); MockGrpcAccessLoggerImpl::SharedPtr logger1 = - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope); - EXPECT_EQ(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP); + EXPECT_EQ(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP)); // Do not deduplicate different types of logger expectClientCreation(); - EXPECT_NE(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::TCP, scope)); + EXPECT_NE(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::TCP)); // Changing log name leads to another logger. config.set_log_name("log-2"); expectClientCreation(); - EXPECT_NE(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); + EXPECT_NE(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP)); config.set_log_name("log-1"); - EXPECT_EQ(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); + EXPECT_EQ(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP)); // Changing cluster name leads to another logger. config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("cluster-2"); expectClientCreation(); - EXPECT_NE(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); + EXPECT_NE(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP)); } } // namespace diff --git a/test/extensions/access_loggers/grpc/BUILD b/test/extensions/access_loggers/grpc/BUILD index 31616c4a41874..0484f14e7ddff 100644 --- a/test/extensions/access_loggers/grpc/BUILD +++ b/test/extensions/access_loggers/grpc/BUILD @@ -92,10 +92,22 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "tcp_config_test", + srcs = ["tcp_config_test.cc"], + extension_names = ["envoy.access_loggers.tcp_grpc"], + deps = [ + "//source/extensions/access_loggers/grpc:tcp_config", + "//test/mocks/server:factory_context_mocks", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", + ], +) + envoy_extension_cc_test( name = "tcp_grpc_access_log_integration_test", srcs = ["tcp_grpc_access_log_integration_test.cc"], - extension_names = ["envoy.access_loggers.http_grpc"], + extension_names = ["envoy.access_loggers.tcp_grpc"], deps = [ "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/grpc:codec_lib", diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc index 3e5e4f58f9008..3ea77be37f64a 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc @@ -128,7 +128,7 @@ class GrpcAccessLoggerCacheImplTest : public testing::Test { : async_client_(new Grpc::MockAsyncClient), factory_(new Grpc::MockAsyncClientFactory), logger_cache_(async_client_manager_, scope_, tls_, local_info_), grpc_access_logger_impl_test_helper_(local_info_, async_client_) { - EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, false)) + EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, true)) .WillOnce(Invoke([this](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { EXPECT_CALL(*factory_, createUncachedRawAsyncClient()).WillOnce(Invoke([this] { return Grpc::RawAsyncClientPtr{async_client_}; @@ -156,7 +156,7 @@ TEST_F(GrpcAccessLoggerCacheImplTest, LoggerCreation) { config.mutable_buffer_size_bytes()->set_value(BUFFER_SIZE_BYTES); GrpcAccessLoggerSharedPtr logger = - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope_); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP); // Note that the local info node() method is mocked, so the node is not really configurable. grpc_access_logger_impl_test_helper_.expectStreamMessage(R"EOF( identifier: diff --git a/test/extensions/access_loggers/grpc/http_config_test.cc b/test/extensions/access_loggers/grpc/http_config_test.cc index c4d7db133a78a..933a4a69d967e 100644 --- a/test/extensions/access_loggers/grpc/http_config_test.cc +++ b/test/extensions/access_loggers/grpc/http_config_test.cc @@ -30,17 +30,37 @@ class HttpGrpcAccessLogConfigTest : public testing::Test { message_ = factory_->createEmptyConfigProto(); ASSERT_NE(nullptr, message_); + } - EXPECT_CALL(context_.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _)) - .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { - return std::make_unique>(); + void run(const std::string cluster_name) { + const auto good_cluster = "good_cluster"; + EXPECT_CALL(context_.cluster_manager_, checkActiveStaticCluster(cluster_name)) + .WillOnce(Invoke([good_cluster](const std::string& cluster_name) { + if (cluster_name != good_cluster) { + throw EnvoyException("fake"); + } })); auto* common_config = http_grpc_access_log_.mutable_common_config(); common_config->set_log_name("foo"); - common_config->mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("bar"); + common_config->mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name(cluster_name); common_config->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); TestUtility::jsonConvert(http_grpc_access_log_, *message_); + + if (cluster_name == good_cluster) { + EXPECT_CALL(context_.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { + return std::make_unique>(); + })); + AccessLog::InstanceSharedPtr instance = + factory_->createAccessLogInstance(*message_, std::move(filter_), context_); + EXPECT_NE(nullptr, instance); + EXPECT_NE(nullptr, dynamic_cast(instance.get())); + } else { + EXPECT_THROW_WITH_MESSAGE( + factory_->createAccessLogInstance(*message_, std::move(filter_), context_), + EnvoyException, "fake"); + } } AccessLog::FilterPtr filter_; @@ -51,12 +71,10 @@ class HttpGrpcAccessLogConfigTest : public testing::Test { }; // Normal OK configuration. -TEST_F(HttpGrpcAccessLogConfigTest, Ok) { - AccessLog::InstanceSharedPtr instance = - factory_->createAccessLogInstance(*message_, std::move(filter_), context_); - EXPECT_NE(nullptr, instance); - EXPECT_NE(nullptr, dynamic_cast(instance.get())); -} +TEST_F(HttpGrpcAccessLogConfigTest, Ok) { run("good_cluster"); } + +// Wrong configuration with invalid clusters. +TEST_F(HttpGrpcAccessLogConfigTest, InvalidCluster) { run("invalid"); } } // namespace } // namespace HttpGrpc diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc index 8c78f302f5612..481c249c779c2 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc @@ -45,9 +45,39 @@ class MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache { // GrpcAccessLoggerCache MOCK_METHOD(GrpcCommon::GrpcAccessLoggerSharedPtr, getOrCreateLogger, (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - Common::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); + Common::GrpcAccessLoggerType logger_type)); }; +// Test for the issue described in https://github.com/envoyproxy/envoy/pull/18081 +TEST(HttpGrpcAccessLog, TlsLifetimeCheck) { + NiceMock tls; + Stats::IsolatedStoreImpl scope; + std::shared_ptr logger_cache{new MockGrpcAccessLoggerCache()}; + tls.defer_data_ = true; + { + AccessLog::MockFilter* filter{new NiceMock()}; + envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config; + config.mutable_common_config()->set_transport_api_version( + envoy::config::core::v3::ApiVersion::V3); + EXPECT_CALL(*logger_cache, getOrCreateLogger(_, _)) + .WillOnce([](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& + common_config, + Common::GrpcAccessLoggerType type) { + // This is a part of the actual getOrCreateLogger code path and shouldn't crash. + std::make_pair(MessageUtil::hash(common_config), type); + return nullptr; + }); + // Set tls callback in the HttpGrpcAccessLog constructor, + // but it is not called yet since we have defer_data_ = true. + const auto access_log = std::make_unique(AccessLog::FilterPtr{filter}, + config, tls, logger_cache); + // Intentionally make access_log die earlier in this scope to simulate the situation where the + // creator has been deleted yet the tls callback is not called yet. + } + // Verify the tls callback does not crash since it captures the env with proper lifetime. + tls.call(); +} + class HttpGrpcAccessLogTest : public testing::Test { public: void init() { @@ -58,17 +88,17 @@ class HttpGrpcAccessLogTest : public testing::Test { config_.mutable_common_config()->add_filter_state_objects_to_log("serialized"); config_.mutable_common_config()->set_transport_api_version( envoy::config::core::v3::ApiVersion::V3); - EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _)) + EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _)) .WillOnce( [this](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - Common::GrpcAccessLoggerType logger_type, Stats::Scope&) { + Common::GrpcAccessLoggerType logger_type) { EXPECT_EQ(config.DebugString(), config_.common_config().DebugString()); EXPECT_EQ(Common::GrpcAccessLoggerType::HTTP, logger_type); return logger_; }); access_log_ = std::make_unique(AccessLog::FilterPtr{filter_}, config_, tls_, - logger_cache_, scope_); + logger_cache_); } void expectLog(const std::string& expected_log_entry_yaml) { diff --git a/test/extensions/access_loggers/grpc/tcp_config_test.cc b/test/extensions/access_loggers/grpc/tcp_config_test.cc new file mode 100644 index 0000000000000..b88f752d86093 --- /dev/null +++ b/test/extensions/access_loggers/grpc/tcp_config_test.cc @@ -0,0 +1,121 @@ +#include "envoy/config/core/v3/grpc_service.pb.h" +#include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/server/access_log_config.h" +#include "envoy/stats/scope.h" + +#include "source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h" + +#include "test/mocks/server/factory_context.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Invoke; + +namespace Envoy { +namespace Extensions { +namespace AccessLoggers { +namespace TcpGrpc { +namespace { + +class TcpGrpcAccessLogConfigTest : public testing::Test { +public: + void SetUp() override { + factory_ = + Registry::FactoryRegistry::getFactory( + "envoy.access_loggers.tcp_grpc"); + ASSERT_NE(nullptr, factory_); + + message_ = factory_->createEmptyConfigProto(); + ASSERT_NE(nullptr, message_); + } + + void run(const std::string cluster_name) { + const auto good_cluster = "good_cluster"; + EXPECT_CALL(context_.cluster_manager_, checkActiveStaticCluster(cluster_name)) + .WillOnce(Invoke([good_cluster](const std::string& cluster_name) { + if (cluster_name != good_cluster) { + throw EnvoyException("fake"); + } + })); + + auto* common_config = tcp_grpc_access_log_.mutable_common_config(); + common_config->set_log_name("foo"); + common_config->mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name(cluster_name); + common_config->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); + TestUtility::jsonConvert(tcp_grpc_access_log_, *message_); + + if (cluster_name == good_cluster) { + EXPECT_CALL(context_.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { + return std::make_unique>(); + })); + AccessLog::InstanceSharedPtr instance = + factory_->createAccessLogInstance(*message_, std::move(filter_), context_); + EXPECT_NE(nullptr, instance); + EXPECT_NE(nullptr, dynamic_cast(instance.get())); + } else { + EXPECT_THROW_WITH_MESSAGE( + factory_->createAccessLogInstance(*message_, std::move(filter_), context_), + EnvoyException, "fake"); + } + } + + AccessLog::FilterPtr filter_; + NiceMock context_; + envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig tcp_grpc_access_log_; + ProtobufTypes::MessagePtr message_; + Server::Configuration::AccessLogInstanceFactory* factory_{}; +}; + +// Normal OK configuration. +TEST_F(TcpGrpcAccessLogConfigTest, Ok) { run("good_cluster"); } + +// Wrong configuration with invalid clusters. +TEST_F(TcpGrpcAccessLogConfigTest, InvalidCluster) { run("invalid"); } + +class MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache { +public: + // GrpcAccessLoggerCache + MOCK_METHOD(GrpcCommon::GrpcAccessLoggerSharedPtr, getOrCreateLogger, + (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, + Common::GrpcAccessLoggerType logger_type)); +}; + +// Test for the issue described in https://github.com/envoyproxy/envoy/pull/18081 +TEST(TcpGrpcAccessLog, TlsLifetimeCheck) { + NiceMock tls; + Stats::IsolatedStoreImpl scope; + std::shared_ptr logger_cache{new MockGrpcAccessLoggerCache()}; + tls.defer_data_ = true; + { + AccessLog::MockFilter* filter{new NiceMock()}; + envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config; + config.mutable_common_config()->set_transport_api_version( + envoy::config::core::v3::ApiVersion::V3); + EXPECT_CALL(*logger_cache, getOrCreateLogger(_, _)) + .WillOnce([](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& + common_config, + Common::GrpcAccessLoggerType type) { + // This is a part of the actual getOrCreateLogger code path and shouldn't crash. + std::make_pair(MessageUtil::hash(common_config), type); + return nullptr; + }); + // Set tls callback in the TcpGrpcAccessLog constructor, + // but it is not called yet since we have defer_data_ = true. + const auto access_log = + std::make_unique(AccessLog::FilterPtr{filter}, config, tls, logger_cache); + // Intentionally make access_log die earlier in this scope to simulate the situation where the + // creator has been deleted yet the tls callback is not called yet. + } + // Verify the tls callback does not crash since it captures the env with proper lifetime. + tls.call(); +} + +} // namespace +} // namespace TcpGrpc +} // namespace AccessLoggers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/access_loggers/open_telemetry/BUILD b/test/extensions/access_loggers/open_telemetry/BUILD index e51a9458ef864..d77fbfb8e09c2 100644 --- a/test/extensions/access_loggers/open_telemetry/BUILD +++ b/test/extensions/access_loggers/open_telemetry/BUILD @@ -58,7 +58,7 @@ envoy_extension_cc_test( "//source/extensions/access_loggers/open_telemetry:config", "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", ], ) @@ -76,7 +76,7 @@ envoy_extension_cc_test( "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@opentelemetry_proto//:logs_cc_proto", ], diff --git a/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc b/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc index f815e3a3bd981..8ddbe5b1d91b4 100644 --- a/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc +++ b/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc @@ -52,7 +52,7 @@ class MockGrpcAccessLoggerCache : public GrpcAccessLoggerCache { // GrpcAccessLoggerCache MOCK_METHOD(GrpcAccessLoggerSharedPtr, getOrCreateLogger, (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - Common::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); + Common::GrpcAccessLoggerType logger_type)); }; class AccessLogTest : public testing::Test { @@ -82,17 +82,16 @@ string_value: "x-request-header: %REQ(x-request-header)%, protocol: %PROTOCOL%" config_.mutable_common_config()->set_log_name("test_log"); config_.mutable_common_config()->set_transport_api_version( envoy::config::core::v3::ApiVersion::V3); - EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _)) + EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _)) .WillOnce( [this](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - Common::GrpcAccessLoggerType logger_type, Stats::Scope&) { + Common::GrpcAccessLoggerType logger_type) { EXPECT_EQ(config.DebugString(), config_.common_config().DebugString()); EXPECT_EQ(Common::GrpcAccessLoggerType::HTTP, logger_type); return logger_; }); - access_log_ = - std::make_unique(FilterPtr{filter_}, config_, tls_, logger_cache_, scope_); + access_log_ = std::make_unique(FilterPtr{filter_}, config_, tls_, logger_cache_); } void expectLog(const std::string& expected_log_entry_yaml) { @@ -111,7 +110,7 @@ string_value: "x-request-header: %REQ(x-request-header)%, protocol: %PROTOCOL%" Stats::IsolatedStoreImpl scope_; MockFilter* filter_{new NiceMock()}; NiceMock tls_; - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig config_; + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig config_; std::shared_ptr logger_{new MockGrpcAccessLogger()}; std::shared_ptr logger_cache_{new MockGrpcAccessLoggerCache()}; AccessLogPtr access_log_; diff --git a/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc b/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc index ce52155a6dc96..4d0a16548516c 100644 --- a/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc +++ b/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc @@ -1,6 +1,6 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "source/common/buffer/zero_copy_input_stream_impl.h" @@ -71,7 +71,7 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, auto* access_log = hcm.add_access_log(); access_log->set_name("grpc_accesslog"); - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig config; auto* common_config = config.mutable_common_config(); common_config->set_log_name("foo"); diff --git a/test/extensions/access_loggers/open_telemetry/config_test.cc b/test/extensions/access_loggers/open_telemetry/config_test.cc index 35017d2831544..30db7a23d9475 100644 --- a/test/extensions/access_loggers/open_telemetry/config_test.cc +++ b/test/extensions/access_loggers/open_telemetry/config_test.cc @@ -1,5 +1,5 @@ #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "envoy/registry/registry.h" #include "envoy/server/access_log_config.h" #include "envoy/stats/scope.h" @@ -46,7 +46,7 @@ class OpenTelemetryAccessLogConfigTest : public testing::Test { ::Envoy::AccessLog::FilterPtr filter_; NiceMock context_; - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig access_log_config_; ProtobufTypes::MessagePtr message_; Server::Configuration::AccessLogInstanceFactory* factory_{}; diff --git a/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc index 850ae1dfa4cdf..b40e82c47236c 100644 --- a/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc @@ -151,7 +151,7 @@ class GrpcAccessLoggerCacheImplTest : public testing::Test { : async_client_(new Grpc::MockAsyncClient), factory_(new Grpc::MockAsyncClientFactory), logger_cache_(async_client_manager_, scope_, tls_, local_info_), grpc_access_logger_impl_test_helper_(local_info_, async_client_) { - EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, false)) + EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, true)) .WillOnce(Invoke([this](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { EXPECT_CALL(*factory_, createUncachedRawAsyncClient()).WillOnce(Invoke([this] { return Grpc::RawAsyncClientPtr{async_client_}; @@ -179,7 +179,7 @@ TEST_F(GrpcAccessLoggerCacheImplTest, LoggerCreation) { config.mutable_buffer_size_bytes()->set_value(BUFFER_SIZE_BYTES); GrpcAccessLoggerSharedPtr logger = - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope_); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP); grpc_access_logger_impl_test_helper_.expectStreamMessage(R"EOF( resource_logs: resource: diff --git a/test/extensions/access_loggers/wasm/config_test.cc b/test/extensions/access_loggers/wasm/config_test.cc index 690834d707a84..f403bddf3ee73 100644 --- a/test/extensions/access_loggers/wasm/config_test.cc +++ b/test/extensions/access_loggers/wasm/config_test.cc @@ -92,6 +92,9 @@ TEST_P(WasmAccessLogConfigTest, CreateWasmFromWASM) { factory->createAccessLogInstance(config, std::move(filter), context); EXPECT_NE(nullptr, instance); EXPECT_NE(nullptr, dynamic_cast(instance.get())); + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api->customStatNamespaces().registered("wasmcustom")); + Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; diff --git a/test/extensions/bootstrap/wasm/config_test.cc b/test/extensions/bootstrap/wasm/config_test.cc index 2995eedc00ebe..bacb5dc3d2cfe 100644 --- a/test/extensions/bootstrap/wasm/config_test.cc +++ b/test/extensions/bootstrap/wasm/config_test.cc @@ -84,6 +84,9 @@ TEST_P(WasmFactoryTest, CreateWasmFromWasm) { initializeWithConfig(config_); EXPECT_NE(extension_, nullptr); + + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api_->customStatNamespaces().registered("wasmcustom")); } TEST_P(WasmFactoryTest, CreateWasmFromWasmPerThread) { diff --git a/test/extensions/bootstrap/wasm/wasm_test.cc b/test/extensions/bootstrap/wasm/wasm_test.cc index f073c62893227..9286aec3d8b79 100644 --- a/test/extensions/bootstrap/wasm/wasm_test.cc +++ b/test/extensions/bootstrap/wasm/wasm_test.cc @@ -285,8 +285,10 @@ TEST_P(WasmNullTest, Stats) { EXPECT_CALL(*context, log_(spdlog::level::err, Eq("get histogram = Unsupported"))); EXPECT_TRUE(wasm_->configure(context, plugin_)); - EXPECT_EQ(scope_->counterFromString("test_counter").value(), 5); - EXPECT_EQ(scope_->gaugeFromString("test_gauge", Stats::Gauge::ImportMode::Accumulate).value(), 2); + EXPECT_EQ(scope_->counterFromString("wasmcustom.test_counter").value(), 5); + EXPECT_EQ(scope_->gaugeFromString("wasmcustom.test_gauge", Stats::Gauge::ImportMode::Accumulate) + .value(), + 2); } TEST_P(WasmNullTest, StatsHigherLevel) { @@ -312,11 +314,12 @@ TEST_P(WasmNullTest, StatsHigherLevel) { wasm_->setTimerPeriod(1, std::chrono::milliseconds(10)); wasm_->tickHandler(1); - EXPECT_EQ(scope_->counterFromString("counter_tag.test_tag.test_counter").value(), 5); - EXPECT_EQ( - scope_->gaugeFromString("gauge_int_tag.9.test_gauge", Stats::Gauge::ImportMode::Accumulate) - .value(), - 2); + EXPECT_EQ(scope_->counterFromString("wasmcustom.counter_tag.test_tag.test_counter").value(), 5); + EXPECT_EQ(scope_ + ->gaugeFromString("wasmcustom.gauge_int_tag.9.test_gauge", + Stats::Gauge::ImportMode::Accumulate) + .value(), + 2); } TEST_P(WasmNullTest, StatsHighLevel) { @@ -346,13 +349,16 @@ TEST_P(WasmNullTest, StatsHighLevel) { // EXPECT_CALL(*context, log_(spdlog::level::err, Eq("stack_h = 3"))); context->onLog(); EXPECT_EQ( - scope_->counterFromString("string_tag.test_tag.int_tag.7.bool_tag.true.test_counter").value(), + scope_ + ->counterFromString("wasmcustom.string_tag.test_tag.int_tag.7.bool_tag.true.test_counter") + .value(), 5); - EXPECT_EQ(scope_ - ->gaugeFromString("string_tag1.test_tag1.string_tag2.test_tag2.test_gauge", - Stats::Gauge::ImportMode::Accumulate) - .value(), - 2); + EXPECT_EQ( + scope_ + ->gaugeFromString("wasmcustom.string_tag1.test_tag1.string_tag2.test_tag2.test_gauge", + Stats::Gauge::ImportMode::Accumulate) + .value(), + 2); } } // namespace Wasm diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index 960c8d1f9338c..346d47b0c3446 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -149,7 +149,7 @@ class RedisClusterIntegrationTest : public testing::TestWithParam(dispatcher_, tls_, random_, filesystem_, loader_, - store_, validation_visitor_, config_); + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(resolver_)); + dns_cache_ = std::make_unique(context_, config_); update_callbacks_handle_ = dns_cache_->addUpdateCallbacks(update_callbacks_); } ~DnsCacheImplTest() override { dns_cache_.reset(); - EXPECT_EQ(0, TestUtility::findGauge(store_, "dns_cache.foo.num_hosts")->value()); + EXPECT_EQ(0, TestUtility::findGauge(context_.scope_, "dns_cache.foo.num_hosts")->value()); } void checkStats(uint64_t query_attempt, uint64_t query_success, uint64_t query_failure, uint64_t address_changed, uint64_t added, uint64_t removed, uint64_t num_hosts) { const auto counter_value = [this](const std::string& name) { - return TestUtility::findCounter(store_, "dns_cache.foo." + name)->value(); + return TestUtility::findCounter(context_.scope_, "dns_cache.foo." + name)->value(); }; EXPECT_EQ(query_attempt, counter_value("dns_query_attempt")); @@ -69,21 +71,16 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT EXPECT_EQ(address_changed, counter_value("host_address_changed")); EXPECT_EQ(added, counter_value("host_added")); EXPECT_EQ(removed, counter_value("host_removed")); - EXPECT_EQ(num_hosts, TestUtility::findGauge(store_, "dns_cache.foo.num_hosts")->value()); + EXPECT_EQ(num_hosts, + TestUtility::findGauge(context_.scope_, "dns_cache.foo.num_hosts")->value()); } + NiceMock context_; envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config_; - NiceMock dispatcher_; std::shared_ptr resolver_{std::make_shared()}; - NiceMock tls_; - NiceMock random_; - NiceMock filesystem_; - NiceMock loader_; - Stats::IsolatedStoreImpl store_; std::unique_ptr dns_cache_; MockUpdateCallbacks update_callbacks_; DnsCache::AddUpdateCallbacksHandlePtr update_callbacks_handle_; - Envoy::ProtobufMessage::MockValidationVisitor validation_visitor_; }; MATCHER_P3(DnsHostInfoEquals, address, resolved_host, is_ip_address, "") { @@ -148,8 +145,8 @@ TEST_F(DnsCacheImplTest, ResolveSuccess) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -212,6 +209,54 @@ TEST_F(DnsCacheImplTest, ResolveSuccess) { 1 /* added */, 0 /* removed */, 1 /* num hosts */); } +// Verify the force refresh API works as expected. +TEST_F(DnsCacheImplTest, ForceRefresh) { + initialize(); + InSequence s; + + // No hosts so should not do anything. + dns_cache_->forceRefreshHosts(); + checkStats(0 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 0 /* added */, 0 /* removed */, 0 /* num hosts */); + + MockLoadDnsCacheEntryCallbacks callbacks; + Network::DnsResolver::ResolveCb resolve_cb; + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); + EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + auto result = dns_cache_->loadDnsCacheEntry("foo.com", 80, callbacks); + EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_); + EXPECT_NE(result.handle_, nullptr); + EXPECT_EQ(absl::nullopt, result.host_info_); + + checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Query in progress so should do nothing. + dns_cache_->forceRefreshHosts(); + checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(update_callbacks_, + onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); + EXPECT_CALL(callbacks, + onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"10.0.0.1"})); + + checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Should force a refresh. Ignore strict mock failures on the enabled() call. + EXPECT_CALL(*timeout_timer, enabled()).Times(AtLeast(0)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(0), _)); + dns_cache_->forceRefreshHosts(); +} + // Ipv4 address. TEST_F(DnsCacheImplTest, Ipv4Address) { initialize(); @@ -219,8 +264,8 @@ TEST_F(DnsCacheImplTest, Ipv4Address) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("127.0.0.1", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -247,8 +292,8 @@ TEST_F(DnsCacheImplTest, Ipv4AddressWithPort) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("127.0.0.1", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -275,8 +320,8 @@ TEST_F(DnsCacheImplTest, Ipv6Address) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("::1", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -301,8 +346,8 @@ TEST_F(DnsCacheImplTest, Ipv6AddressWithPort) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("::1", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -327,8 +372,8 @@ TEST_F(DnsCacheImplTest, TTL) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -377,8 +422,8 @@ TEST_F(DnsCacheImplTest, TTL) { 1 /* added */, 1 /* removed */, 0 /* num hosts */); // Make sure we don't get a cache hit the next time the host is requested. - resolve_timer = new Event::MockTimer(&dispatcher_); - timeout_timer = new Event::MockTimer(&dispatcher_); + new Event::MockTimer(&context_.dispatcher_); // resolve_timer + timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -400,8 +445,8 @@ TEST_F(DnsCacheImplTest, TTLWithCustomParameters) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(1000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -443,14 +488,14 @@ TEST_F(DnsCacheImplTest, InlineResolve) { MockLoadDnsCacheEntryCallbacks callbacks; Event::PostCb post_cb; - EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb)); + EXPECT_CALL(context_.dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb)); auto result = dns_cache_->loadDnsCacheEntry("localhost", 80, callbacks); EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_); EXPECT_NE(result.handle_, nullptr); EXPECT_EQ(absl::nullopt, result.host_info_); - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("localhost", _, _)) .WillOnce(Invoke([](const std::string&, Network::DnsLookupFamily, @@ -476,8 +521,8 @@ TEST_F(DnsCacheImplTest, ResolveTimeout) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -496,7 +541,8 @@ TEST_F(DnsCacheImplTest, ResolveTimeout) { timeout_timer->invokeCallback(); checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); - EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.dns_query_timeout")->value()); + EXPECT_EQ(1, + TestUtility::findCounter(context_.scope_, "dns_cache.foo.dns_query_timeout")->value()); } // Resolve failure that returns no addresses. @@ -506,8 +552,8 @@ TEST_F(DnsCacheImplTest, ResolveFailure) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -555,8 +601,8 @@ TEST_F(DnsCacheImplTest, ResolveFailureWithFailureRefreshRate) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -570,7 +616,7 @@ TEST_F(DnsCacheImplTest, ResolveFailureWithFailureRefreshRate) { EXPECT_CALL(*timeout_timer, disableTimer()); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate(_, _)).Times(0); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoAddressIsNull())); - ON_CALL(random_, random()).WillByDefault(Return(8000)); + ON_CALL(context_.api_.random_, random()).WillByDefault(Return(8000)); EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(1000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Failure, TestUtility::makeDnsResponse({})); checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */, @@ -601,8 +647,8 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithEmptyResult) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -824,7 +870,7 @@ TEST_F(DnsCacheImplTest, MaxHostOverflow) { EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Overflow, result.status_); EXPECT_EQ(result.handle_, nullptr); EXPECT_EQ(absl::nullopt, result.host_info_); - EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.host_overflow")->value()); + EXPECT_EQ(1, TestUtility::findCounter(context_.scope_, "dns_cache.foo.host_overflow")->value()); } TEST_F(DnsCacheImplTest, CircuitBreakersNotInvoked) { @@ -840,17 +886,18 @@ TEST_F(DnsCacheImplTest, DnsCacheCircuitBreakersOverflow) { auto raii_ptr = dns_cache_->canCreateDnsRequest(); EXPECT_EQ(raii_ptr.get(), nullptr); - EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.dns_rq_pending_overflow")->value()); + EXPECT_EQ( + 1, + TestUtility::findCounter(context_.scope_, "dns_cache.foo.dns_rq_pending_overflow")->value()); } TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionSetDeprecatedField) { initialize(); config_.set_use_tcp_for_dns_lookups(true); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); } @@ -861,10 +908,9 @@ TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionSet) { ->mutable_dns_resolver_options() ->set_use_tcp_for_dns_lookups(true); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); } @@ -875,10 +921,9 @@ TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionSet) { ->mutable_dns_resolver_options() ->set_no_default_search_domain(true); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `true` here means dns_resolver_options.no_default_search_domain is set to true. EXPECT_EQ(true, dns_resolver_options.no_default_search_domain()); } @@ -886,10 +931,9 @@ TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionSet) { TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionUnSet) { initialize(); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `false` here means dns_resolver_options.use_tcp_for_dns_lookups is set to false. EXPECT_EQ(false, dns_resolver_options.use_tcp_for_dns_lookups()); } @@ -897,24 +941,17 @@ TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionUnSet) { TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionUnSet) { initialize(); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `false` here means dns_resolver_options.no_default_search_domain is set to false. EXPECT_EQ(false, dns_resolver_options.no_default_search_domain()); } // DNS cache manager config tests. TEST(DnsCacheManagerImplTest, LoadViaConfig) { - NiceMock dispatcher; - NiceMock tls; - NiceMock random; - NiceMock loader; - Stats::IsolatedStoreImpl store; - NiceMock filesystem; - Envoy::ProtobufMessage::MockValidationVisitor visitor; - DnsCacheManagerImpl cache_manager(dispatcher, tls, random, filesystem, loader, store, visitor); + NiceMock context; + DnsCacheManagerImpl cache_manager(context); envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1; config1.set_name("foo"); @@ -939,31 +976,38 @@ TEST(DnsCacheManagerImplTest, LoadViaConfig) { "config specified DNS cache 'foo' with different settings"); } +TEST(DnsCacheManagerImplTest, LookupByName) { + NiceMock context; + DnsCacheManagerImpl cache_manager(context); + + EXPECT_EQ(cache_manager.lookUpCacheByName("foo"), nullptr); + + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1; + config1.set_name("foo"); + + auto cache1 = cache_manager.getCache(config1); + EXPECT_NE(cache1, nullptr); + + auto cache2 = cache_manager.lookUpCacheByName("foo"); + EXPECT_NE(cache2, nullptr); + EXPECT_EQ(cache1, cache2); +} + TEST(DnsCacheConfigOptionsTest, EmtpyDnsResolutionConfig) { - NiceMock dispatcher; + NiceMock context; + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; std::shared_ptr resolver{std::make_shared()}; - NiceMock tls; - NiceMock random; - NiceMock loader; - Stats::IsolatedStoreImpl store; - envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; std::vector expected_empty_dns_resolvers; - EXPECT_CALL(dispatcher, createDnsResolver(expected_empty_dns_resolvers, _)) + EXPECT_CALL(context.dispatcher_, createDnsResolver(expected_empty_dns_resolvers, _)) .WillOnce(Return(resolver)); - NiceMock filesystem; - Envoy::ProtobufMessage::MockValidationVisitor visitor; - DnsCacheImpl dns_cache(dispatcher, tls, random, filesystem, loader, store, visitor, config); + DnsCacheImpl dns_cache_(context, config); } TEST(DnsCacheConfigOptionsTest, NonEmptyDnsResolutionConfig) { - NiceMock dispatcher; - std::shared_ptr resolver{std::make_shared()}; - NiceMock tls; - NiceMock random; - NiceMock loader; - Stats::IsolatedStoreImpl store; + NiceMock context; envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + std::shared_ptr resolver{std::make_shared()}; envoy::config::core::v3::Address* dns_resolvers = config.mutable_dns_resolution_config()->add_resolvers(); @@ -972,12 +1016,10 @@ TEST(DnsCacheConfigOptionsTest, NonEmptyDnsResolutionConfig) { std::vector expected_dns_resolvers; expected_dns_resolvers.push_back(Network::Address::resolveProtoAddress(*dns_resolvers)); - EXPECT_CALL(dispatcher, + EXPECT_CALL(context.dispatcher_, createDnsResolver(CustomDnsResolversSizeEquals(expected_dns_resolvers), _)) .WillOnce(Return(resolver)); - NiceMock filesystem; - Envoy::ProtobufMessage::MockValidationVisitor visitor; - DnsCacheImpl dns_cache_(dispatcher, tls, random, filesystem, loader, store, visitor, config); + DnsCacheImpl dns_cache_(context, config); } // Note: this test is done here, rather than a TYPED_TEST_SUITE in @@ -1025,6 +1067,9 @@ TEST(UtilityTest, PrepareDnsRefreshStrategy) { } TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { + auto* time_source = new NiceMock(); + context_.dispatcher_.time_system_.reset(time_source); + // Configure the cache. MockKeyValueStoreFactory factory; EXPECT_CALL(factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() { @@ -1049,8 +1094,8 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -1064,14 +1109,14 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { EXPECT_CALL(*timeout_timer, disableTimer()); // Make sure the store gets the first insert. - EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80")); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80|30|0")); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.1"})); + TestUtility::makeDnsResponse({"10.0.0.1"}, std::chrono::seconds(30))); checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); @@ -1087,9 +1132,10 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { // Address does not change. EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80|30|0")); EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.1"})); + TestUtility::makeDnsResponse({"10.0.0.1"}, std::chrono::seconds(30))); checkStats(2 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); @@ -1105,15 +1151,46 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { EXPECT_CALL(*timeout_timer, disableTimer()); // Make sure the store gets the updated address. - EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80")); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.2:80", "foo.com", false))); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80|30|0")); EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.2"})); + TestUtility::makeDnsResponse({"10.0.0.2"}, std::chrono::seconds(30))); checkStats(3 /* attempt */, 3 /* success */, 0 /* failure */, 2 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Now do one more resolve, where the address does not change but the time + // does. + + // Re-resolve timer. + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); + EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + resolve_timer->invokeCallback(); + + // Address does not change. + EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80|40|0")); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"10.0.0.2"}, std::chrono::seconds(40))); +} + +// Make sure the cache manager can handle the context going out of scope. +TEST(DnsCacheManagerImplTest, TestLifetime) { + NiceMock context; + std::unique_ptr cache_manager; + + { + Server::FactoryContextBaseImpl scoped_context(context); + cache_manager = std::make_unique(scoped_context); + } + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1; + config1.set_name("foo"); + + EXPECT_TRUE(cache_manager->getCache(config1) != nullptr); } } // namespace diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.h b/test/extensions/common/dynamic_forward_proxy/mocks.h index d65583e21c596..3765190ab29f7 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.h +++ b/test/extensions/common/dynamic_forward_proxy/mocks.h @@ -59,6 +59,7 @@ class MockDnsCache : public DnsCache { MOCK_METHOD((void), iterateHostMap, (IterateHostMapCb)); MOCK_METHOD((absl::optional), getHost, (absl::string_view)); MOCK_METHOD(Upstream::ResourceAutoIncDec*, canCreateDnsRequest_, ()); + MOCK_METHOD(void, forceRefreshHosts, ()); }; class MockLoadDnsCacheEntryHandle : public DnsCache::LoadDnsCacheEntryHandle { @@ -76,6 +77,7 @@ class MockDnsCacheManager : public DnsCacheManager { MOCK_METHOD(DnsCacheSharedPtr, getCache, (const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config)); + MOCK_METHOD(DnsCacheSharedPtr, lookUpCacheByName, (absl::string_view cache_name)); std::shared_ptr> dns_cache_{new NiceMock()}; }; diff --git a/test/extensions/common/utility_test.cc b/test/extensions/common/utility_test.cc index 7a61dc4863028..0c22d1187bff8 100644 --- a/test/extensions/common/utility_test.cc +++ b/test/extensions/common/utility_test.cc @@ -19,9 +19,9 @@ namespace { // Test that deprecated names indicate warning or block depending on runtime flags. TEST(ExtensionNameUtilTest, DEPRECATED_FEATURE_TEST(TestDeprecatedExtensionNameStatus)) { - // Validate that no runtime available results in warnings. + // Validate that no runtime available results in block. { - EXPECT_EQ(ExtensionNameUtil::Status::Warn, + EXPECT_EQ(ExtensionNameUtil::Status::Block, ExtensionNameUtil::deprecatedExtensionNameStatus(nullptr)); } @@ -54,16 +54,11 @@ TEST(ExtensionNameUtilTest, DEPRECATED_FEATURE_TEST(TestDeprecatedExtensionNameS // Test that deprecated names trigger an exception. TEST(ExtensionNameUtilTest, DEPRECATED_FEATURE_TEST(TestCheckDeprecatedExtensionNameThrows)) { - // Validate that no runtime available results in warnings. + // Validate that no runtime available results in exception. { - auto test = []() { - ExtensionNameUtil::checkDeprecatedExtensionName("XXX", "deprecated", "canonical", nullptr); - }; - - EXPECT_NO_THROW(test()); - - EXPECT_LOG_CONTAINS("warn", "Using deprecated XXX extension name 'deprecated' for 'canonical'.", - test()); + EXPECT_THROW_WITH_REGEX( + ExtensionNameUtil::checkDeprecatedExtensionName("XXX", "deprecated", "canonical", nullptr), + EnvoyException, "Using deprecated XXX extension name 'deprecated' for 'canonical'.*"); } // If deprecated feature is enabled, warn. @@ -101,16 +96,15 @@ TEST(ExtensionNameUtilTest, DEPRECATED_FEATURE_TEST(TestCheckDeprecatedExtension // Test that deprecated names are reported as allowed or not, with logging. TEST(ExtensionNameUtilTest, DEPRECATED_FEATURE_TEST(TestAllowDeprecatedExtensionName)) { - // Validate that no runtime available results in warnings and allows deprecated names. + // Validate that no runtime available results in a log message and returns false. { auto test = []() { return ExtensionNameUtil::allowDeprecatedExtensionName("XXX", "deprecated", "canonical", nullptr); }; - EXPECT_TRUE(test()); + EXPECT_FALSE(test()); - EXPECT_LOG_CONTAINS("warn", "Using deprecated XXX extension name 'deprecated' for 'canonical'.", - test()); + EXPECT_LOG_CONTAINS("error", "#using-runtime-overrides-for-deprecated-features", test()); } // If deprecated feature is enabled, log and return true. diff --git a/test/extensions/common/wasm/BUILD b/test/extensions/common/wasm/BUILD index 54e6b7a24fcd6..cca41d4cf939c 100644 --- a/test/extensions/common/wasm/BUILD +++ b/test/extensions/common/wasm/BUILD @@ -46,7 +46,6 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", - "//source/extensions/common/crypto:utility_lib", "//source/extensions/common/wasm:wasm_lib", "//test/extensions/common/wasm:wasm_runtime", "//test/extensions/common/wasm/test_data:test_context_cpp_plugin", diff --git a/test/extensions/common/wasm/wasm_test.cc b/test/extensions/common/wasm/wasm_test.cc index 2e947bc33e814..4e358f3d8e5f6 100644 --- a/test/extensions/common/wasm/wasm_test.cc +++ b/test/extensions/common/wasm/wasm_test.cc @@ -569,13 +569,8 @@ TEST_P(WasmCommonTest, Foreign) { wasm->setCreateContextForTesting( nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { auto root_context = new TestContext(wasm, plugin); -#ifdef ZLIBNG_VERSION - EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq("compress 2000 -> 22"))); - EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq("uncompress 22 -> 2000"))); -#else EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq("compress 2000 -> 23"))); EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq("uncompress 23 -> 2000"))); -#endif return root_context; }); wasm->start(plugin); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index 8d776a73b061f..57a9fd6906753 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -205,6 +205,39 @@ TEST_F(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { client_->onSuccess(std::move(check_response), span_); } +// Test the client when a denied response with unknown HTTP status code (i.e. if +// DeniedResponse.status is not set by the auth server implementation). The response sent to client +// is set with the default HTTP status code for denied response (403 Forbidden). +TEST_F(ExtAuthzGrpcClientTest, AuthorizationDeniedWithEmptyDeniedResponseStatus) { + initialize(); + + const std::string expected_body{"test"}; + const auto expected_headers = + TestCommon::makeHeaderValueOption({{"foo", "bar", false}, {"foobar", "bar", true}}); + const auto expected_downstream_headers = TestCommon::makeHeaderValueOption({}); + auto check_response = TestCommon::makeCheckResponse( + Grpc::Status::WellKnownGrpcStatus::PermissionDenied, envoy::type::v3::Empty, expected_body, + expected_headers, expected_downstream_headers); + // When the check response gives unknown denied response HTTP status code, the filter sets the + // response HTTP status code with 403 Forbidden (default). + auto authz_response = + TestCommon::makeAuthzResponse(CheckStatus::Denied, Http::Code::Forbidden, expected_body, + expected_headers, expected_downstream_headers); + + envoy::service::auth::v3::CheckRequest request; + expectCallSend(request); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + + Http::TestRequestHeaderMapImpl headers; + client_->onCreateInitialMetadata(headers); + EXPECT_EQ(nullptr, headers.RequestId()); + EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); + EXPECT_CALL(request_callbacks_, + onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); + + client_->onSuccess(std::move(check_response), span_); +} + // Test the client when an unknown error occurs. TEST_F(ExtAuthzGrpcClientTest, UnknownError) { initialize(); @@ -282,6 +315,46 @@ TEST_F(ExtAuthzGrpcClientTest, AuthorizationOkWithDynamicMetadata) { client_->onSuccess(std::move(check_response), span_); } +// Test the client when an OK response is received with additional query string parameters. +TEST_F(ExtAuthzGrpcClientTest, AuthorizationOkWithQueryParameters) { + initialize(); + + auto check_response = std::make_unique(); + auto status = check_response->mutable_status(); + + status->set_code(Grpc::Status::WellKnownGrpcStatus::Ok); + + const Http::Utility::QueryParamsVector query_parameters_to_set{{"add-me", "yes"}}; + for (const auto& [key, value] : query_parameters_to_set) { + auto* query_parameter = check_response->mutable_ok_response()->add_query_parameters_to_set(); + query_parameter->set_key(key); + query_parameter->set_value(value); + } + + const std::vector query_parameters_to_remove{"remove-me"}; + for (const auto& key : query_parameters_to_remove) { + check_response->mutable_ok_response()->add_query_parameters_to_remove(key); + } + + // This is the expected authz response. + auto authz_response = Response{}; + authz_response.status = CheckStatus::OK; + authz_response.query_parameters_to_set = {{"add-me", "yes"}}; + authz_response.query_parameters_to_remove = {"remove-me"}; + + envoy::service::auth::v3::CheckRequest request; + expectCallSend(request); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + + Http::TestRequestHeaderMapImpl headers; + client_->onCreateInitialMetadata(headers); + + EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); + EXPECT_CALL(request_callbacks_, + onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); + client_->onSuccess(std::move(check_response), span_); +} + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 96d0be2f7b171..27a9a70b45ad7 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -102,6 +102,52 @@ class ExtAuthzHttpClientTest : public testing::Test { return std::make_shared(proto_config, timeout, path_prefix); } + void dynamicMetadataTest(CheckStatus status, const std::string& http_status) { + const std::string yaml = R"EOF( + http_service: + server_uri: + uri: "ext_authz:9000" + cluster: "ext_authz" + timeout: 0.25s + authorization_response: + dynamic_metadata_from_headers: + patterns: + - prefix: "X-Metadata-" + ignore_case: true + failure_mode_allow: true + )EOF"; + + initialize(yaml); + envoy::service::auth::v3::CheckRequest request; + client_->check(request_callbacks_, request, parent_span_, stream_info_); + + ProtobufWkt::Struct expected_dynamic_metadata; + auto* metadata_fields = expected_dynamic_metadata.mutable_fields(); + (*metadata_fields)["x-metadata-header-0"] = ValueUtil::stringValue("zero"); + (*metadata_fields)["x-metadata-header-1"] = ValueUtil::stringValue("2"); + (*metadata_fields)["x-metadata-header-2"] = ValueUtil::stringValue("4"); + + // When we call onSuccess() at the bottom of the test we expect that all the + // dynamic metadata values that we set above to be present in the authz Response + // below. + Response authz_response; + authz_response.status = status; + authz_response.dynamic_metadata = expected_dynamic_metadata; + EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo( + AuthzResponseNoAttributes(authz_response)))); + + const HeaderValueOptionVector http_response_headers = TestCommon::makeHeaderValueOption({ + {":status", http_status, false}, + {"bar", "nope", false}, + {"x-metadata-header-0", "zero", false}, + {"x-metadata-header-1", "2", false}, + {"x-foo", "nah", false}, + {"x-metadata-header-2", "4", false}, + }); + Http::ResponseMessagePtr http_response = TestCommon::makeMessageResponse(http_response_headers); + client_->onSuccess(async_request_, std::move(http_response)); + } + Http::RequestMessagePtr sendRequest(absl::node_hash_map&& headers) { envoy::service::auth::v3::CheckRequest request{}; auto mutable_headers = @@ -303,7 +349,8 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeaders) { {{":status", "200", false}, {"x-downstream-ok", "1", false}, {"x-upstream-ok", "1", false}}); const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::OK, Http::Code::OK, EMPTY_STRING, TestCommon::makeHeaderValueOption({}), - TestCommon::makeHeaderValueOption({{"x-downstream-ok", "1", false}})); + // By default, the value of envoy.config.core.v3.HeaderValueOption.append is true. + TestCommon::makeHeaderValueOption({{"x-downstream-ok", "1", true}})); auto check_response = TestCommon::makeMessageResponse(expected_headers); envoy::service::auth::v3::CheckRequest request; auto mutable_headers = @@ -419,6 +466,16 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithHeadersToRemove) { client_->onSuccess(async_request_, std::move(http_response)); } +// Test the client when an OK response is received with dynamic metadata in that OK response. +TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithDynamicMetadata) { + dynamicMetadataTest(CheckStatus::OK, "200"); +} + +// Test the client when a denied response is received with dynamic metadata in the denied response. +TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithDynamicMetadata) { + dynamicMetadataTest(CheckStatus::Denied, "403"); +} + // Test the client when a denied response is received. TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "403", false}}); diff --git a/test/extensions/filters/common/ext_authz/test_common.cc b/test/extensions/filters/common/ext_authz/test_common.cc index 67429f891e2ae..cdb7867a401a0 100644 --- a/test/extensions/filters/common/ext_authz/test_common.cc +++ b/test/extensions/filters/common/ext_authz/test_common.cc @@ -82,8 +82,13 @@ Response TestCommon::makeAuthzResponse(CheckStatus status, Http::Code status_cod } if (!downstream_headers.empty()) { for (auto& header : downstream_headers) { - authz_response.response_headers_to_add.emplace_back( - Http::LowerCaseString(header.header().key()), header.header().value()); + if (header.append().value()) { + authz_response.response_headers_to_add.emplace_back( + Http::LowerCaseString(header.header().key()), header.header().value()); + } else { + authz_response.response_headers_to_set.emplace_back( + Http::LowerCaseString(header.header().key()), header.header().value()); + } } } return authz_response; @@ -125,6 +130,19 @@ bool TestCommon::compareVectorOfHeaderName(const std::vector(rhs.begin(), rhs.end()); } +bool TestCommon::compareVectorOfUnorderedStrings(const std::vector& lhs, + const std::vector& rhs) { + return std::set(lhs.begin(), lhs.end()) == + std::set(rhs.begin(), rhs.end()); +} + +// TODO(esmet): This belongs in a QueryParams class +bool TestCommon::compareQueryParamsVector(const Http::Utility::QueryParamsVector& lhs, + const Http::Utility::QueryParamsVector& rhs) { + return std::set>(lhs.begin(), lhs.end()) == + std::set>(rhs.begin(), rhs.end()); +} + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/test/extensions/filters/common/ext_authz/test_common.h b/test/extensions/filters/common/ext_authz/test_common.h index 5d1e72222713d..0b058b67d42d1 100644 --- a/test/extensions/filters/common/ext_authz/test_common.h +++ b/test/extensions/filters/common/ext_authz/test_common.h @@ -46,8 +46,12 @@ class TestCommon { static HeaderValueOptionVector makeHeaderValueOption(KeyValueOptionVector&& headers); static bool compareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs); + static bool compareQueryParamsVector(const Http::Utility::QueryParamsVector& lhs, + const Http::Utility::QueryParamsVector& rhs); static bool compareVectorOfHeaderName(const std::vector& lhs, const std::vector& rhs); + static bool compareVectorOfUnorderedStrings(const std::vector& lhs, + const std::vector& rhs); }; MATCHER_P(AuthzErrorResponse, status, "") { @@ -111,6 +115,18 @@ MATCHER_P(AuthzOkResponse, response, "") { return false; } + // Compare query_parameters_to_set. + if (!TestCommon::compareQueryParamsVector(response.query_parameters_to_set, + arg->query_parameters_to_set)) { + return false; + } + + // Compare query_parameters_to_remove. + if (!TestCommon::compareVectorOfUnorderedStrings(response.query_parameters_to_remove, + arg->query_parameters_to_remove)) { + return false; + } + return TestCommon::compareVectorOfHeaderName(response.headers_to_remove, arg->headers_to_remove); } diff --git a/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc b/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc index c857bbe74a1ca..3965930cfb336 100644 --- a/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc +++ b/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc @@ -247,8 +247,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, CasEdgeCasesDescriptor) { synchronizer().enable(); // Start a thread and start the fill callback. This will wait pre-CAS. - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); synchronizer().waitOn("on_fill_timer_pre_cas"); std::thread t1([&] { EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); @@ -296,8 +296,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketDescriptor2) { EXPECT_TRUE(rate_limiter_->requestAllowed(descriptor_)); EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); } // Verify token bucket functionality with a single token. @@ -311,8 +311,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketDescriptor) { EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); // 0 -> 1 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); @@ -321,14 +321,14 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketDescriptor) { EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); // 0 -> 1 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); // 1 -> 1 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); @@ -349,8 +349,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketMultipleTokensPerFillDescr EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); // 0 -> 2 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); @@ -358,8 +358,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketMultipleTokensPerFillDescr EXPECT_TRUE(rate_limiter_->requestAllowed(descriptor_)); // 1 -> 2 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); @@ -383,8 +383,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketDifferentDescriptorDiffere EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); // 0 -> 1 tokens for descriptor2_ - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(50), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(50), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); diff --git a/test/extensions/filters/common/rbac/engine_impl_test.cc b/test/extensions/filters/common/rbac/engine_impl_test.cc index 8e56a2023b4f0..24c727245f5dc 100644 --- a/test/extensions/filters/common/rbac/engine_impl_test.cc +++ b/test/extensions/filters/common/rbac/engine_impl_test.cc @@ -67,11 +67,13 @@ void onMetadata(NiceMock& info) { TEST(RoleBasedAccessControlEngineImpl, Disabled) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); - RBAC::RoleBasedAccessControlEngineImpl engine_allow(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine_allow( + rbac, ProtobufMessage::getStrictValidationVisitor()); checkEngine(engine_allow, false, LogResult::Undecided); rbac.set_action(envoy::config::rbac::v3::RBAC::DENY); - RBAC::RoleBasedAccessControlEngineImpl engine_deny(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine_deny(rbac, + ProtobufMessage::getStrictValidationVisitor()); checkEngine(engine_deny, true, LogResult::Undecided); } @@ -169,7 +171,8 @@ TEST(RoleBasedAccessControlEngineImpl, AllowedAllowlist) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); Envoy::Network::MockConnection conn; Envoy::Http::TestRequestHeaderMapImpl headers; @@ -192,7 +195,8 @@ TEST(RoleBasedAccessControlEngineImpl, DeniedDenylist) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::DENY); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); Envoy::Network::MockConnection conn; Envoy::Http::TestRequestHeaderMapImpl headers; @@ -220,7 +224,8 @@ TEST(RoleBasedAccessControlEngineImpl, BasicCondition) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); checkEngine(engine, false, LogResult::Undecided); } @@ -241,12 +246,14 @@ TEST(RoleBasedAccessControlEngineImpl, MalformedCondition) { rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; - EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine(rbac), EnvoyException, - "failed to create an expression: .*"); + EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine( + rbac, ProtobufMessage::getStrictValidationVisitor()), + EnvoyException, "failed to create an expression: .*"); rbac.set_action(envoy::config::rbac::v3::RBAC::LOG); - EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine_log(rbac), EnvoyException, - "failed to create an expression: .*"); + EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine_log( + rbac, ProtobufMessage::getStrictValidationVisitor()), + EnvoyException, "failed to create an expression: .*"); } TEST(RoleBasedAccessControlEngineImpl, MistypedCondition) { @@ -262,7 +269,8 @@ TEST(RoleBasedAccessControlEngineImpl, MistypedCondition) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); checkEngine(engine, false, LogResult::Undecided); } @@ -282,7 +290,8 @@ TEST(RoleBasedAccessControlEngineImpl, EvaluationFailure) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); checkEngine(engine, false, LogResult::Undecided); } @@ -307,7 +316,8 @@ TEST(RoleBasedAccessControlEngineImpl, ErrorCondition) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); checkEngine(engine, false, LogResult::Undecided, Envoy::Network::MockConnection()); } @@ -337,7 +347,8 @@ TEST(RoleBasedAccessControlEngineImpl, HeaderCondition) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); Envoy::Http::TestRequestHeaderMapImpl headers; Envoy::Http::LowerCaseString key("foo"); @@ -378,7 +389,8 @@ TEST(RoleBasedAccessControlEngineImpl, MetadataCondition) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; @@ -405,7 +417,8 @@ TEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); Envoy::Network::MockConnection conn; Envoy::Http::TestRequestHeaderMapImpl headers; @@ -423,7 +436,8 @@ TEST(RoleBasedAccessControlEngineImpl, DisabledLog) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::LOG); - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); checkEngine(engine, true, RBAC::LogResult::No, info); } @@ -435,7 +449,8 @@ TEST(RoleBasedAccessControlEngineImpl, LogIfMatched) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::LOG); (*rbac.mutable_policies())["foo"] = policy; - RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac, + ProtobufMessage::getStrictValidationVisitor()); Envoy::Network::MockConnection conn; Envoy::Http::TestRequestHeaderMapImpl headers; diff --git a/test/extensions/filters/common/rbac/matchers_test.cc b/test/extensions/filters/common/rbac/matchers_test.cc index 09f9f75d08863..7d96e355d9e53 100644 --- a/test/extensions/filters/common/rbac/matchers_test.cc +++ b/test/extensions/filters/common/rbac/matchers_test.cc @@ -45,7 +45,7 @@ TEST(AndMatcher, Permission_Set) { envoy::config::rbac::v3::Permission* perm = set.add_rules(); perm->set_any(true); - checkMatcher(RBAC::AndMatcher(set), true); + checkMatcher(RBAC::AndMatcher(set, ProtobufMessage::getStrictValidationVisitor()), true); perm = set.add_rules(); perm->set_destination_port(123); @@ -57,12 +57,14 @@ TEST(AndMatcher, Permission_Set) { Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); info.downstream_connection_info_provider_->setLocalAddress(addr); - checkMatcher(RBAC::AndMatcher(set), true, conn, headers, info); + checkMatcher(RBAC::AndMatcher(set, ProtobufMessage::getStrictValidationVisitor()), true, conn, + headers, info); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 8080, false); info.downstream_connection_info_provider_->setLocalAddress(addr); - checkMatcher(RBAC::AndMatcher(set), false, conn, headers, info); + checkMatcher(RBAC::AndMatcher(set, ProtobufMessage::getStrictValidationVisitor()), false, conn, + headers, info); } TEST(AndMatcher, Principal_Set) { @@ -104,18 +106,21 @@ TEST(OrMatcher, Permission_Set) { Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); info.downstream_connection_info_provider_->setLocalAddress(addr); - checkMatcher(RBAC::OrMatcher(set), false, conn, headers, info); + checkMatcher(RBAC::OrMatcher(set, ProtobufMessage::getStrictValidationVisitor()), false, conn, + headers, info); perm = set.add_rules(); perm->mutable_destination_port_range()->set_start(123); perm->mutable_destination_port_range()->set_end(456); - checkMatcher(RBAC::OrMatcher(set), false, conn, headers, info); + checkMatcher(RBAC::OrMatcher(set, ProtobufMessage::getStrictValidationVisitor()), false, conn, + headers, info); perm = set.add_rules(); perm->set_any(true); - checkMatcher(RBAC::OrMatcher(set), true, conn, headers, info); + checkMatcher(RBAC::OrMatcher(set, ProtobufMessage::getStrictValidationVisitor()), true, conn, + headers, info); } TEST(OrMatcher, Principal_Set) { @@ -144,7 +149,8 @@ TEST(NotMatcher, Permission) { envoy::config::rbac::v3::Permission perm; perm.set_any(true); - checkMatcher(RBAC::NotMatcher(perm), false, Envoy::Network::MockConnection()); + checkMatcher(RBAC::NotMatcher(perm, ProtobufMessage::getStrictValidationVisitor()), false, + Envoy::Network::MockConnection()); } TEST(NotMatcher, Principal) { @@ -419,7 +425,7 @@ TEST(PolicyMatcher, PolicyMatcher) { policy.add_principals()->mutable_authenticated()->mutable_principal_name()->set_exact("bar"); Expr::BuilderPtr builder = Expr::createBuilder(nullptr); - RBAC::PolicyMatcher matcher(policy, builder.get()); + RBAC::PolicyMatcher matcher(policy, builder.get(), ProtobufMessage::getStrictValidationVisitor()); Envoy::Network::MockConnection conn; Envoy::Http::TestRequestHeaderMapImpl headers; diff --git a/test/extensions/filters/common/rbac/mocks.h b/test/extensions/filters/common/rbac/mocks.h index 354a1e5dab153..99503af52f4e4 100644 --- a/test/extensions/filters/common/rbac/mocks.h +++ b/test/extensions/filters/common/rbac/mocks.h @@ -2,6 +2,7 @@ #include "envoy/config/rbac/v3/rbac.pb.h" +#include "source/common/protobuf/message_validator_impl.h" #include "source/extensions/filters/common/rbac/engine_impl.h" #include "gmock/gmock.h" @@ -16,7 +17,8 @@ class MockEngine : public RoleBasedAccessControlEngineImpl { public: MockEngine(const envoy::config::rbac::v3::RBAC& rules, const EnforcementMode mode = EnforcementMode::Enforced) - : RoleBasedAccessControlEngineImpl(rules, mode){}; + : RoleBasedAccessControlEngineImpl(rules, ProtobufMessage::getStrictValidationVisitor(), + mode){}; MOCK_METHOD(bool, handleAction, (const Envoy::Network::Connection&, const Envoy::Http::RequestHeaderMap&, diff --git a/test/extensions/filters/http/admission_control/BUILD b/test/extensions/filters/http/admission_control/BUILD index c8ddc35c8a907..e6dbc2a9b2b0b 100644 --- a/test/extensions/filters/http/admission_control/BUILD +++ b/test/extensions/filters/http/admission_control/BUILD @@ -25,7 +25,7 @@ envoy_extension_cc_test( "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) @@ -43,7 +43,7 @@ envoy_extension_cc_test( "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) @@ -53,7 +53,7 @@ envoy_extension_cc_test( extension_names = ["envoy.filters.http.admission_control"], deps = [ "//source/extensions/filters/http/admission_control:admission_control_filter_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) @@ -77,6 +77,6 @@ envoy_extension_cc_test( "//source/extensions/filters/http/admission_control:admission_control_filter_lib", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc index eaea6c0c5cbd4..62b99ed674501 100644 --- a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc @@ -1,7 +1,7 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "envoy/grpc/status.h" #include "source/common/common/enum_to_int.h" diff --git a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc index e59dba7611f65..319fb8ffc9351 100644 --- a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc @@ -15,7 +15,7 @@ const std::string ADMISSION_CONTROL_CONFIG = R"EOF( name: envoy.filters.http.admission_control typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl + "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl success_criteria: http_criteria: grpc_criteria: diff --git a/test/extensions/filters/http/admission_control/config_test.cc b/test/extensions/filters/http/admission_control/config_test.cc index 80e1d9ce81ae5..6bbeb0e4192fe 100644 --- a/test/extensions/filters/http/admission_control/config_test.cc +++ b/test/extensions/filters/http/admission_control/config_test.cc @@ -1,7 +1,7 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/common/stats/isolated_store_impl.h" #include "source/extensions/filters/http/admission_control/admission_control.h" diff --git a/test/extensions/filters/http/admission_control/controller_test.cc b/test/extensions/filters/http/admission_control/controller_test.cc index 2b74282519031..5457f9fc65aaa 100644 --- a/test/extensions/filters/http/admission_control/controller_test.cc +++ b/test/extensions/filters/http/admission_control/controller_test.cc @@ -1,7 +1,7 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/extensions/filters/http/admission_control/thread_local_controller.h" diff --git a/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc b/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc index 7e3725daed477..99c421710d7dc 100644 --- a/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc +++ b/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc @@ -1,7 +1,7 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/common/common/enum_to_int.h" #include "source/extensions/filters/http/admission_control/admission_control.h" diff --git a/test/extensions/filters/http/alternate_protocols_cache/BUILD b/test/extensions/filters/http/alternate_protocols_cache/BUILD index b04a9403c840d..e30f2549c64f1 100644 --- a/test/extensions/filters/http/alternate_protocols_cache/BUILD +++ b/test/extensions/filters/http/alternate_protocols_cache/BUILD @@ -33,11 +33,14 @@ envoy_extension_cc_test( extension_names = ["envoy.filters.http.alternate_protocols_cache"], deps = [ "//source/extensions/filters/http/alternate_protocols_cache:config", + "//source/extensions/key_value/file_based:config_lib", "//test/integration:http_integration_lib", "//test/integration:http_protocol_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/common/key_value/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/key_value/file_based/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc b/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc index 0bfc84ac19fae..b09210bc37799 100644 --- a/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc +++ b/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc @@ -1,6 +1,8 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/common/key_value/v3/config.pb.validate.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/extensions/key_value/file_based/v3/config.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "source/extensions/transport_sockets/tls/context_config_impl.h" @@ -18,18 +20,39 @@ namespace { class FilterIntegrationTest : public HttpProtocolIntegrationTest { protected: void initialize() override { - const std::string filter = R"EOF( + const std::string filename = TestEnvironment::temporaryPath("alt_svc_cache.txt"); + envoy::config::core::v3::AlternateProtocolsCacheOptions alt_cache; + alt_cache.set_name("default_alternate_protocols_cache"); + envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig config; + config.set_filename(filename); + envoy::config::common::key_value::v3::KeyValueStoreConfig kv_config; + kv_config.mutable_config()->set_name("envoy.key_value.file_based"); + kv_config.mutable_config()->mutable_typed_config()->PackFrom(config); + alt_cache.mutable_key_value_store_config()->set_name("envoy.common.key_value"); + alt_cache.mutable_key_value_store_config()->mutable_typed_config()->PackFrom(kv_config); + + const std::string filter = fmt::format(R"EOF( name: alternate_protocols_cache typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.alternate_protocols_cache.v3.FilterConfig alternate_protocols_cache_options: name: default_alternate_protocols_cache -)EOF"; + key_value_store_config: + name: "envoy.common.key_value" + typed_config: + "@type": type.googleapis.com/envoy.config.common.key_value.v3.KeyValueStoreConfig + config: + name: envoy.key_value.file_based + typed_config: + "@type": type.googleapis.com/envoy.extensions.key_value.file_based.v3.FileBasedKeyValueStoreConfig + filename: {} + +)EOF", + filename); config_helper_.prependFilter(filter); upstream_tls_ = true; - config_helper_.configureUpstreamTls(/*use_alpn=*/true, /*http3=*/true, - /*use_alternate_protocols_cache=*/true); + config_helper_.configureUpstreamTls(/*use_alpn=*/true, /*http3=*/true, alt_cache); HttpProtocolIntegrationTest::initialize(); } @@ -81,6 +104,77 @@ INSTANTIATE_TEST_SUITE_P(Protocols, FilterIntegrationTest, {Http::CodecType::HTTP2}, {Http::CodecType::HTTP3})), HttpProtocolIntegrationTest::protocolTestParamsToString); +class MixedUpstreamIntegrationTest : public FilterIntegrationTest { +protected: + void writeFile() { + const std::string filename = TestEnvironment::temporaryPath("alt_svc_cache.txt"); + // There's no hostname here because we're not doing dynamic forward proxying so we infer the + // hostname from the config (which does not set it) + uint32_t port = fake_upstreams_[0]->localAddress()->ip()->port(); + std::string key = absl::StrCat("https://:", port); + + size_t seconds = std::chrono::duration_cast( + timeSystem().monotonicTime().time_since_epoch()) + .count(); + std::string value = absl::StrCat("h3=\":", port, "\"; ma=", 86400 + seconds); + TestEnvironment::writeStringToFileForTest( + "alt_svc_cache.txt", absl::StrCat(key.length(), "\n", key, value.length(), "\n", value)); + } + + void createUpstreams() override { + ASSERT_EQ(upstreamProtocol(), Http::CodecType::HTTP3); + ASSERT_EQ(fake_upstreams_count_, 1); + ASSERT_FALSE(autonomous_upstream_); + + if (use_http2_) { + auto config = configWithType(Http::CodecType::HTTP2); + Network::TransportSocketFactoryPtr factory = createUpstreamTlsContext(config); + addFakeUpstream(std::move(factory), Http::CodecType::HTTP2); + } else { + auto config = configWithType(Http::CodecType::HTTP3); + Network::TransportSocketFactoryPtr factory = createUpstreamTlsContext(config); + addFakeUpstream(std::move(factory), Http::CodecType::HTTP3); + writeFile(); + } + } + + bool use_http2_{false}; +}; + +TEST_P(MixedUpstreamIntegrationTest, BasicRequestAutoWithHttp3) { + testRouterRequestAndResponseWithBody(0, 0, false); +} + +TEST_P(MixedUpstreamIntegrationTest, SimultaneousRequestsAutoWithHttp3) { + simultaneousRequest(1024, 512, 1023, 513); +} + +TEST_P(MixedUpstreamIntegrationTest, SimultaneousLargeRequestsAutoWithHttp3) { + config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. + simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16); +} + +TEST_P(MixedUpstreamIntegrationTest, BasicRequestAutoWithHttp2) { + use_http2_ = true; + testRouterRequestAndResponseWithBody(0, 0, false); +} + +TEST_P(MixedUpstreamIntegrationTest, SimultaneousRequestsAutoWithHttp2) { + use_http2_ = true; + simultaneousRequest(1024, 512, 1023, 513); +} + +TEST_P(MixedUpstreamIntegrationTest, SimultaneousLargeRequestsAutoWithHttp2) { + use_http2_ = true; + config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. + simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16); +} + +INSTANTIATE_TEST_SUITE_P(Protocols, MixedUpstreamIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP2}, {Http::CodecType::HTTP3})), + HttpProtocolIntegrationTest::protocolTestParamsToString); + #endif } // namespace diff --git a/test/extensions/filters/http/alternate_protocols_cache/filter_test.cc b/test/extensions/filters/http/alternate_protocols_cache/filter_test.cc index 951978299620a..51e0c85161509 100644 --- a/test/extensions/filters/http/alternate_protocols_cache/filter_test.cc +++ b/test/extensions/filters/http/alternate_protocols_cache/filter_test.cc @@ -32,15 +32,16 @@ class FilterTest : public testing::Test, public Event::TestUsingSimulatedTime { envoy::extensions::filters::http::alternate_protocols_cache::v3::FilterConfig proto_config; if (populate_config) { proto_config.mutable_alternate_protocols_cache_options()->set_name("foo"); - EXPECT_CALL(*alternate_protocols_cache_manager_, getCache(_)) + EXPECT_CALL(*alternate_protocols_cache_manager_, getCache(_, _)) .WillOnce(Return(alternate_protocols_cache_)); } filter_config_ = std::make_shared( proto_config, alternate_protocols_cache_manager_factory_, simTime()); - filter_ = std::make_unique(filter_config_); + filter_ = std::make_unique(filter_config_, dispatcher_); filter_->setEncoderFilterCallbacks(callbacks_); } + Event::MockDispatcher dispatcher_; Http::MockAlternateProtocolsCacheManagerFactory alternate_protocols_cache_manager_factory_; std::shared_ptr alternate_protocols_cache_manager_; std::shared_ptr alternate_protocols_cache_; diff --git a/test/extensions/filters/http/bandwidth_limit/BUILD b/test/extensions/filters/http/bandwidth_limit/BUILD index b4e4ac3a87c9a..c41fe5ca0fe99 100644 --- a/test/extensions/filters/http/bandwidth_limit/BUILD +++ b/test/extensions/filters/http/bandwidth_limit/BUILD @@ -23,7 +23,7 @@ envoy_extension_cc_test( "//source/common/runtime:runtime_lib", "//source/extensions/filters/http/bandwidth_limit:bandwidth_limit_lib", "//test/mocks/server:server_mocks", - "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/bandwidth_limit/config_test.cc b/test/extensions/filters/http/bandwidth_limit/config_test.cc index b98ea7ca4d67f..853a3ca8896ac 100644 --- a/test/extensions/filters/http/bandwidth_limit/config_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/config_test.cc @@ -11,8 +11,7 @@ namespace Extensions { namespace HttpFilters { namespace BandwidthLimitFilter { -using EnableMode = - envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit_EnableMode; +using EnableMode = envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit_EnableMode; TEST(Factory, GlobalEmptyConfig) { const std::string yaml = R"( diff --git a/test/extensions/filters/http/bandwidth_limit/filter_test.cc b/test/extensions/filters/http/bandwidth_limit/filter_test.cc index daffe9076ea34..5d052318111cf 100644 --- a/test/extensions/filters/http/bandwidth_limit/filter_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/filter_test.cc @@ -1,4 +1,4 @@ -#include "envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.pb.h" +#include "envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.pb.h" #include "source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h" @@ -22,7 +22,7 @@ class FilterTest : public testing::Test { FilterTest() = default; void setup(const std::string& yaml) { - envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit config; + envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit config; TestUtility::loadFromYaml(yaml, config); config_ = std::make_shared(config, stats_, runtime_, time_system_, true); filter_ = std::make_shared(config_); diff --git a/test/extensions/filters/http/buffer/config_test.cc b/test/extensions/filters/http/buffer/config_test.cc index 912210a0df4b6..e2cd5d071f440 100644 --- a/test/extensions/filters/http/buffer/config_test.cc +++ b/test/extensions/filters/http/buffer/config_test.cc @@ -100,11 +100,12 @@ TEST(BufferFilterFactoryTest, BufferFilterRouteSpecificConfig) { EXPECT_TRUE(inflated); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(BufferFilterFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.buffer"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/cache/BUILD b/test/extensions/filters/http/cache/BUILD index 725e66c71fb85..c8409772591a3 100644 --- a/test/extensions/filters/http/cache/BUILD +++ b/test/extensions/filters/http/cache/BUILD @@ -80,7 +80,7 @@ envoy_extension_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/cache/simple_http_cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/cache/simple_http_cache/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/cache/cache_filter_integration_test.cc b/test/extensions/filters/http/cache/cache_filter_integration_test.cc index 5f09027b73900..5b813b8a368b0 100644 --- a/test/extensions/filters/http/cache/cache_filter_integration_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_integration_test.cc @@ -86,9 +86,9 @@ class CacheIntegrationTest : public Event::TestUsingSimulatedTime, const std::string default_config{R"EOF( name: "envoy.filters.http.cache" typed_config: - "@type": "type.googleapis.com/envoy.extensions.filters.http.cache.v3alpha.CacheConfig" + "@type": "type.googleapis.com/envoy.extensions.filters.http.cache.v3.CacheConfig" typed_config: - "@type": "type.googleapis.com/envoy.extensions.cache.simple_http_cache.v3alpha.SimpleHttpCacheConfig" + "@type": "type.googleapis.com/envoy.extensions.cache.simple_http_cache.v3.SimpleHttpCacheConfig" )EOF"}; DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; }; diff --git a/test/extensions/filters/http/cache/cache_filter_test.cc b/test/extensions/filters/http/cache/cache_filter_test.cc index 3fecae678b985..16c2e17641a18 100644 --- a/test/extensions/filters/http/cache/cache_filter_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_test.cc @@ -118,7 +118,7 @@ class CacheFilterTest : public ::testing::Test { void waitBeforeSecondRequest() { time_source_.advanceTimeWait(delay_); } SimpleHttpCache simple_cache_; - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config_; + envoy::extensions::filters::http::cache::v3::CacheConfig config_; NiceMock context_; Event::SimulatedTimeSystem time_source_; DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; diff --git a/test/extensions/filters/http/cache/cache_headers_utils_test.cc b/test/extensions/filters/http/cache/cache_headers_utils_test.cc index fe1fa7098bf60..35a9a7dd60d23 100644 --- a/test/extensions/filters/http/cache/cache_headers_utils_test.cc +++ b/test/extensions/filters/http/cache/cache_headers_utils_test.cc @@ -802,9 +802,9 @@ TEST(CreateVaryIdentifier, DisallowedHeaderWithAllowedHeader) { absl::nullopt); } -envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { +envoy::extensions::filters::http::cache::v3::CacheConfig getConfig() { // Allows {accept, accept-language, width} to be varied in the tests. - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); add_accept->set_exact("accept"); diff --git a/test/extensions/filters/http/cache/cacheability_utils_test.cc b/test/extensions/filters/http/cache/cacheability_utils_test.cc index 18ad3ebba33e0..e5bbf9061b8de 100644 --- a/test/extensions/filters/http/cache/cacheability_utils_test.cc +++ b/test/extensions/filters/http/cache/cacheability_utils_test.cc @@ -25,9 +25,9 @@ class RequestConditionalHeadersTest : public testing::TestWithParam std::string conditionalHeader() const { return GetParam(); } }; -envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { +envoy::extensions::filters::http::cache::v3::CacheConfig getConfig() { // Allows 'accept' to be varied in the tests. - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); add_accept->set_exact("accept"); return config; diff --git a/test/extensions/filters/http/cache/config_test.cc b/test/extensions/filters/http/cache/config_test.cc index 0991f842a2f7d..3583e95cb8f11 100644 --- a/test/extensions/filters/http/cache/config_test.cc +++ b/test/extensions/filters/http/cache/config_test.cc @@ -1,4 +1,4 @@ -#include "envoy/extensions/cache/simple_http_cache/v3alpha/config.pb.h" +#include "envoy/extensions/cache/simple_http_cache/v3/config.pb.h" #include "source/extensions/filters/http/cache/cache_filter.h" #include "source/extensions/filters/http/cache/config.h" @@ -16,7 +16,7 @@ namespace { class CacheFilterFactoryTest : public ::testing::Test { protected: - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config_; + envoy::extensions::filters::http::cache::v3::CacheConfig config_; NiceMock context_; CacheFilterFactory factory_; Http::MockFilterChainFactoryCallbacks filter_callback_; @@ -24,7 +24,7 @@ class CacheFilterFactoryTest : public ::testing::Test { TEST_F(CacheFilterFactoryTest, Basic) { config_.mutable_typed_config()->PackFrom( - envoy::extensions::cache::simple_http_cache::v3alpha::SimpleHttpCacheConfig()); + envoy::extensions::cache::simple_http_cache::v3::SimpleHttpCacheConfig()); Http::FilterFactoryCb cb = factory_.createFilterFactoryFromProto(config_, "stats", context_); Http::StreamFilterSharedPtr filter; EXPECT_CALL(filter_callback_, addStreamFilter(_)).WillOnce(::testing::SaveArg<0>(&filter)); @@ -39,7 +39,7 @@ TEST_F(CacheFilterFactoryTest, NoTypedConfig) { TEST_F(CacheFilterFactoryTest, UnregisteredTypedConfig) { config_.mutable_typed_config()->PackFrom( - envoy::extensions::filters::http::cache::v3alpha::CacheConfig()); + envoy::extensions::filters::http::cache::v3::CacheConfig()); EXPECT_THROW(factory_.createFilterFactoryFromProto(config_, "stats", context_), EnvoyException); } diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index 5cf24b047d122..f9887661e1b63 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -30,9 +30,9 @@ struct LookupRequestTestCase { using Seconds = std::chrono::seconds; -envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { +envoy::extensions::filters::http::cache::v3::CacheConfig getConfig() { // Allows 'accept' to be varied in the tests. - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); add_accept->set_exact("accept"); return config; diff --git a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc index 76e2d1dc2e538..f2eb5ac181af0 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc +++ b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc @@ -19,9 +19,9 @@ namespace { const std::string EpochDate = "Thu, 01 Jan 1970 00:00:00 GMT"; -envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { +envoy::extensions::filters::http::cache::v3::CacheConfig getConfig() { // Allows 'accept' to be varied in the tests. - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); add_accept->set_exact("accept"); return config; @@ -37,16 +37,17 @@ class SimpleHttpCacheTest : public testing::Test { } // Updates the cache entry's header - void updateHeaders(LookupContextPtr lookup, - const Http::TestResponseHeaderMapImpl& response_headers, + void updateHeaders(LookupContext& lookup, const Http::TestResponseHeaderMapImpl& response_headers, const ResponseMetadata& metadata) { - cache_.updateHeaders(*lookup, response_headers, metadata); + cache_.updateHeaders(lookup, response_headers, metadata); } void updateHeaders(absl::string_view request_path, const Http::TestResponseHeaderMapImpl& response_headers, const ResponseMetadata& metadata) { - updateHeaders(lookup(request_path), response_headers, metadata); + LookupRequest request = makeLookupRequest(request_path); + LookupContextPtr context = cache_.makeLookupContext(std::move(request)); + updateHeaders(*context, response_headers, metadata); } // Performs a cache lookup. @@ -61,7 +62,7 @@ class SimpleHttpCacheTest : public testing::Test { void insert(LookupContextPtr lookup, const Http::TestResponseHeaderMapImpl& response_headers, const absl::string_view response_body) { InsertContextPtr inserter = cache_.makeInsertContext(move(lookup)); - const ResponseMetadata metadata = {current_time_}; + const ResponseMetadata metadata = {time_source_.systemTime()}; inserter->insertHeaders(response_headers, metadata, false); inserter->insertBody(Buffer::OwnedImpl(response_body), nullptr, true); } @@ -96,7 +97,7 @@ class SimpleHttpCacheTest : public testing::Test { LookupRequest makeLookupRequest(absl::string_view request_path) { request_headers_.setPath(request_path); - return LookupRequest(request_headers_, current_time_, vary_allow_list_); + return LookupRequest(request_headers_, time_source_.systemTime(), vary_allow_list_); } AssertionResult expectLookupSuccessWithBody(LookupContext* lookup_context, @@ -145,7 +146,6 @@ class SimpleHttpCacheTest : public testing::Test { LookupResult lookup_result_; Http::TestRequestHeaderMapImpl request_headers_; Event::SimulatedTimeSystem time_source_; - SystemTime current_time_ = time_source_.systemTime(); DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; VaryAllowList vary_allow_list_; }; @@ -156,8 +156,9 @@ TEST_F(SimpleHttpCacheTest, PutGet) { LookupContextPtr name_lookup_context = lookup(request_path_1); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); - Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, - {"cache-control", "public,max-age=3600"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"date", formatter_.fromTime(time_source_.systemTime())}, + {"cache-control", "public,max-age=3600"}}; const std::string Body1("Value"); insert(move(name_lookup_context), response_headers, Body1); @@ -174,9 +175,10 @@ TEST_F(SimpleHttpCacheTest, PutGet) { } TEST_F(SimpleHttpCacheTest, PrivateResponse) { - Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, - {"age", "2"}, - {"cache-control", "private,max-age=3600"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"date", formatter_.fromTime(time_source_.systemTime())}, + {"age", "2"}, + {"cache-control", "private,max-age=3600"}}; const std::string request_path("/name"); LookupContextPtr name_lookup_context = lookup(request_path); @@ -196,8 +198,9 @@ TEST_F(SimpleHttpCacheTest, Miss) { } TEST_F(SimpleHttpCacheTest, Fresh) { + const std::string time_value_1 = formatter_.fromTime(time_source_.systemTime()); const Http::TestResponseHeaderMapImpl response_headers = { - {"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}; + {"date", time_value_1}, {"cache-control", "public, max-age=3600"}}; // TODO(toddmgreer): Test with various date headers. insert("/", response_headers, ""); time_source_.advanceTimeWait(Seconds(3600)); @@ -206,13 +209,15 @@ TEST_F(SimpleHttpCacheTest, Fresh) { } TEST_F(SimpleHttpCacheTest, Stale) { + const std::string time_value_1 = formatter_.fromTime(time_source_.systemTime()); const Http::TestResponseHeaderMapImpl response_headers = { - {"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}; + {"date", time_value_1}, {"cache-control", "public, max-age=3600"}}; // TODO(toddmgreer): Test with various date headers. insert("/", response_headers, ""); time_source_.advanceTimeWait(Seconds(3601)); lookup("/"); - EXPECT_EQ(CacheEntryStatus::Ok, lookup_result_.cache_entry_status_); + + EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_result_.cache_entry_status_); } TEST_F(SimpleHttpCacheTest, RequestSmallMinFresh) { @@ -221,9 +226,10 @@ TEST_F(SimpleHttpCacheTest, RequestSmallMinFresh) { LookupContextPtr name_lookup_context = lookup(request_path); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); - Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, - {"age", "6000"}, - {"cache-control", "public, max-age=9000"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"date", formatter_.fromTime(time_source_.systemTime())}, + {"age", "6000"}, + {"cache-control", "public, max-age=9000"}}; const std::string Body("Value"); insert(move(name_lookup_context), response_headers, Body); EXPECT_TRUE(expectLookupSuccessWithBody(lookup(request_path).get(), Body)); @@ -236,9 +242,10 @@ TEST_F(SimpleHttpCacheTest, ResponseStaleWithRequestLargeMaxStale) { LookupContextPtr name_lookup_context = lookup(request_path); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); - Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, - {"age", "7200"}, - {"cache-control", "public, max-age=3600"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"date", formatter_.fromTime(time_source_.systemTime())}, + {"age", "7200"}, + {"cache-control", "public, max-age=3600"}}; const std::string Body("Value"); insert(move(name_lookup_context), response_headers, Body); @@ -246,11 +253,12 @@ TEST_F(SimpleHttpCacheTest, ResponseStaleWithRequestLargeMaxStale) { } TEST_F(SimpleHttpCacheTest, StreamingPut) { - Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, - {"age", "2"}, - {"cache-control", "public, max-age=3600"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"date", formatter_.fromTime(time_source_.systemTime())}, + {"age", "2"}, + {"cache-control", "public, max-age=3600"}}; InsertContextPtr inserter = cache_.makeInsertContext(lookup("request_path")); - const ResponseMetadata metadata = {current_time_}; + const ResponseMetadata metadata = {time_source_.systemTime()}; inserter->insertHeaders(response_headers, metadata, false); inserter->insertBody( Buffer::OwnedImpl("Hello, "), [](bool ready) { EXPECT_TRUE(ready); }, false); @@ -264,9 +272,9 @@ TEST_F(SimpleHttpCacheTest, StreamingPut) { TEST(Registration, GetFactory) { HttpCacheFactory* factory = Registry::FactoryRegistry::getFactoryByType( - "envoy.extensions.cache.simple_http_cache.v3alpha.SimpleHttpCacheConfig"); + "envoy.extensions.cache.simple_http_cache.v3.SimpleHttpCacheConfig"); ASSERT_NE(factory, nullptr); - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; config.mutable_typed_config()->PackFrom(*factory->createEmptyConfigProto()); EXPECT_EQ(factory->getCache(config).cacheInfo().name_, "envoy.extensions.http.cache.simple"); } @@ -274,9 +282,10 @@ TEST(Registration, GetFactory) { TEST_F(SimpleHttpCacheTest, VaryResponses) { // Responses will vary on accept. const std::string RequestPath("some-resource"); - Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, - {"cache-control", "public,max-age=3600"}, - {"vary", "accept"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"date", formatter_.fromTime(time_source_.systemTime())}, + {"cache-control", "public,max-age=3600"}, + {"vary", "accept"}}; // First request. request_headers_.setCopy(Http::LowerCaseString("accept"), "image/*"); @@ -314,9 +323,10 @@ TEST_F(SimpleHttpCacheTest, VaryResponses) { TEST_F(SimpleHttpCacheTest, VaryOnDisallowedKey) { // Responses will vary on accept. const std::string RequestPath("some-resource"); - Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, - {"cache-control", "public,max-age=3600"}, - {"vary", "user-agent"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"date", formatter_.fromTime(time_source_.systemTime())}, + {"cache-control", "public,max-age=3600"}, + {"vary", "user-agent"}}; // First request. request_headers_.setCopy(Http::LowerCaseString("user-agent"), "user_agent_one"); @@ -330,31 +340,35 @@ TEST_F(SimpleHttpCacheTest, VaryOnDisallowedKey) { TEST_F(SimpleHttpCacheTest, UpdateHeadersAndMetadata) { const std::string request_path_1("/name"); - Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, + const std::string time_value_1 = formatter_.fromTime(time_source_.systemTime()); + Http::TestResponseHeaderMapImpl response_headers{{"date", time_value_1}, {"cache-control", "public,max-age=3600"}}; insert(request_path_1, response_headers, "body"); EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers)); // Update the date field in the headers time_source_.advanceTimeWait(Seconds(3601)); - - response_headers = Http::TestResponseHeaderMapImpl{{"date", formatter_.fromTime(current_time_)}, + const SystemTime time_2 = time_source_.systemTime(); + const std::string time_value_2 = formatter_.fromTime(time_2); + response_headers = Http::TestResponseHeaderMapImpl{{"date", time_value_2}, {"cache-control", "public,max-age=3600"}}; - updateHeaders(request_path_1, response_headers, {current_time_}); + updateHeaders(request_path_1, response_headers, {time_2}); EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers)); } TEST_F(SimpleHttpCacheTest, UpdateHeadersForMissingKey) { const std::string request_path_1("/name"); - Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, - {"cache-control", "public,max-age=3600"}}; - updateHeaders(request_path_1, response_headers, {current_time_}); + Http::TestResponseHeaderMapImpl response_headers{ + {"date", formatter_.fromTime(time_source_.systemTime())}, + {"cache-control", "public,max-age=3600"}}; + updateHeaders(request_path_1, response_headers, {time_source_.systemTime()}); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); } TEST_F(SimpleHttpCacheTest, UpdateHeadersDisabledForVaryHeaders) { const std::string request_path_1("/name"); - Http::TestResponseHeaderMapImpl response_headers_1{{"date", formatter_.fromTime(current_time_)}, + const std::string time_value_1 = formatter_.fromTime(time_source_.systemTime()); + Http::TestResponseHeaderMapImpl response_headers_1{{"date", time_value_1}, {"cache-control", "public,max-age=3600"}, {"accept", "image/*"}, {"vary", "accept"}}; @@ -362,14 +376,112 @@ TEST_F(SimpleHttpCacheTest, UpdateHeadersDisabledForVaryHeaders) { EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers_1)); // Update the date field in the headers - time_source_.advanceTimeWait(Seconds(3601)); - Http::TestResponseHeaderMapImpl response_headers_2{{"date", formatter_.fromTime(current_time_)}, + time_source_.advanceTimeWait(Seconds(3600)); + const SystemTime time_2 = time_source_.systemTime(); + const std::string time_value_2 = formatter_.fromTime(time_2); + Http::TestResponseHeaderMapImpl response_headers_2{{"date", time_value_2}, {"cache-control", "public,max-age=3600"}, {"accept", "image/*"}, {"vary", "accept"}}; - updateHeaders(request_path_1, response_headers_2, {current_time_}); + updateHeaders(request_path_1, response_headers_2, {time_2}); + + EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers_1)); +} + +TEST_F(SimpleHttpCacheTest, UpdateHeadersSkipEtagHeader) { + const std::string request_path_1("/name"); + const std::string time_value_1 = formatter_.fromTime(time_source_.systemTime()); + Http::TestResponseHeaderMapImpl response_headers_1{ + {"date", time_value_1}, {"cache-control", "public,max-age=3600"}, {"etag", "0000-0000"}}; + insert(request_path_1, response_headers_1, "body"); + EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers_1)); + + // Update the date field in the headers + time_source_.advanceTimeWait(Seconds(3601)); + const SystemTime time_2 = time_source_.systemTime(); + const std::string time_value_2 = formatter_.fromTime(time_2); + Http::TestResponseHeaderMapImpl response_headers_2{ + {"date", time_value_2}, {"cache-control", "public,max-age=3600"}, {"etag", "1111-1111"}}; + // The etag header should not be updated + Http::TestResponseHeaderMapImpl response_headers_3{ + {"date", time_value_2}, {"cache-control", "public,max-age=3600"}, {"etag", "0000-0000"}}; + + updateHeaders(request_path_1, response_headers_2, {time_2}); + + EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers_3)); +} + +TEST_F(SimpleHttpCacheTest, UpdateHeadersSkipSpecificHeaders) { + const std::string request_path_1("/name"); + const std::string time_value_1 = formatter_.fromTime(time_source_.systemTime()); + + // Vary not tested because we have separate tests that cover it + Http::TestResponseHeaderMapImpl origin_response_headers{ + {"date", time_value_1}, + {"cache-control", "public,max-age=3600"}, + {"content-range", "bytes 200-1000/67589"}, + {"content-length", "800"}, + {"etag", "0000-0000"}, + {"etag", "1111-1111"}, + {"link", "; rel=\"preconnect\""}}; + insert(request_path_1, origin_response_headers, "body"); + EXPECT_TRUE( + expectLookupSuccessWithHeaders(lookup(request_path_1).get(), origin_response_headers)); + time_source_.advanceTimeWait(Seconds(100)); + + const SystemTime time_2 = time_source_.systemTime(); + const std::string time_value_2 = formatter_.fromTime(time_2); + Http::TestResponseHeaderMapImpl incoming_response_headers{ + {"date", time_value_2}, + {"cache-control", "public,max-age=3600"}, + {"content-range", "bytes 5-1000/67589"}, + {"content-length", "995"}, + {"content-length", "996"}, + {"age", "20"}, + {"etag", "2222-2222"}, + {"link", "; rel=\"preconnect\""}}; + + // The skipped headers should not be updated + // "age" and "link" should be updated + Http::TestResponseHeaderMapImpl expected_response_headers{ + {"date", time_value_2}, + {"cache-control", "public,max-age=3600"}, + {"content-range", "bytes 200-1000/67589"}, + {"content-length", "800"}, + {"age", "20"}, + {"etag", "0000-0000"}, + {"etag", "1111-1111"}, + {"link", "; rel=\"preconnect\""}}; + + updateHeaders(request_path_1, incoming_response_headers, {time_2}); + + EXPECT_TRUE( + expectLookupSuccessWithHeaders(lookup(request_path_1).get(), expected_response_headers)); +} + +TEST_F(SimpleHttpCacheTest, UpdateHeadersWithMultivalue) { + const std::string request_path_1("/name"); + const SystemTime time_1 = time_source_.systemTime(); + const std::string time_value_1(formatter_.fromTime(time_1)); + // Vary not tested because we have separate tests that cover it + Http::TestResponseHeaderMapImpl response_headers_1{ + {"date", time_value_1}, + {"cache-control", "public,max-age=3600"}, + {"link", "; rel=\"preconnect\""}, + {"link", "; rel=\"preconnect\""}}; + insert(request_path_1, response_headers_1, "body"); EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers_1)); + + Http::TestResponseHeaderMapImpl response_headers_2{ + {"date", time_value_1}, + {"cache-control", "public,max-age=3600"}, + {"link", "; rel=\"preconnect\""}, + {"link", "; rel=\"preconnect\""}}; + + updateHeaders(request_path_1, response_headers_2, {time_1}); + + EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers_2)); } } // namespace diff --git a/test/extensions/filters/http/cdn_loop/BUILD b/test/extensions/filters/http/cdn_loop/BUILD index e08b4eb331723..747f31fcabb7b 100644 --- a/test/extensions/filters/http/cdn_loop/BUILD +++ b/test/extensions/filters/http/cdn_loop/BUILD @@ -22,7 +22,7 @@ envoy_extension_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3:pkg_cc_proto", ], ) @@ -34,7 +34,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/cdn_loop:config", "//test/integration:http_protocol_integration_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/cdn_loop/config_test.cc b/test/extensions/filters/http/cdn_loop/config_test.cc index ffcc776c9004b..5e9b89d3c636e 100644 --- a/test/extensions/filters/http/cdn_loop/config_test.cc +++ b/test/extensions/filters/http/cdn_loop/config_test.cc @@ -1,6 +1,6 @@ #include -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.h" #include "source/extensions/filters/http/cdn_loop/config.h" #include "source/extensions/filters/http/cdn_loop/filter.h" @@ -23,7 +23,7 @@ TEST(CdnLoopFilterFactoryTest, ValidValuesWork) { Http::MockFilterChainFactoryCallbacks filter_callbacks; EXPECT_CALL(filter_callbacks, addStreamDecoderFilter(_)).WillOnce(::testing::SaveArg<0>(&filter)); - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; config.set_cdn_id("cdn"); CdnLoopFilterFactory factory; @@ -36,7 +36,7 @@ TEST(CdnLoopFilterFactoryTest, ValidValuesWork) { TEST(CdnLoopFilterFactoryTest, BlankCdnIdThrows) { NiceMock context; - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; CdnLoopFilterFactory factory; EXPECT_THAT_THROWS_MESSAGE(factory.createFilterFactoryFromProto(config, "stats", context), @@ -46,7 +46,7 @@ TEST(CdnLoopFilterFactoryTest, BlankCdnIdThrows) { TEST(CdnLoopFilterFactoryTest, InvalidCdnId) { NiceMock context; - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; config.set_cdn_id("[not-token-or-ip"); CdnLoopFilterFactory factory; @@ -57,7 +57,7 @@ TEST(CdnLoopFilterFactoryTest, InvalidCdnId) { TEST(CdnLoopFilterFactoryTest, InvalidCdnIdNonHeaderWhitespace) { NiceMock context; - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; config.set_cdn_id("\r\n"); CdnLoopFilterFactory factory; @@ -68,7 +68,7 @@ TEST(CdnLoopFilterFactoryTest, InvalidCdnIdNonHeaderWhitespace) { TEST(CdnLoopFilterFactoryTest, InvalidParsedCdnIdNotInput) { NiceMock context; - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; config.set_cdn_id("cdn,cdn"); CdnLoopFilterFactory factory; diff --git a/test/extensions/filters/http/cdn_loop/filter_integration_test.cc b/test/extensions/filters/http/cdn_loop/filter_integration_test.cc index 1403d84aa5adc..b6858b0961575 100644 --- a/test/extensions/filters/http/cdn_loop/filter_integration_test.cc +++ b/test/extensions/filters/http/cdn_loop/filter_integration_test.cc @@ -1,6 +1,6 @@ #include -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.h" #include "test/integration/http_protocol_integration.h" #include "test/test_common/utility.h" @@ -16,14 +16,14 @@ namespace { const std::string MaxDefaultConfig = R"EOF( name: envoy.filters.http.cdn_loop typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig + "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3.CdnLoopConfig cdn_id: cdn )EOF"; const std::string MaxOf2Config = R"EOF( name: envoy.filters.http.cdn_loop typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig + "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3.CdnLoopConfig cdn_id: cdn max_allowed_occurrences: 2 )EOF"; diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 index 60ffb84c5ac3c..48300675a1c17 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 @@ -1,7 +1,7 @@ config { name: "envoy.filters.http.oauth" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2" + type_url: "type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2" value: "\n\306\t\022\006\022\001(\032\001r\032<\n\035envoy.filters.\360\222\213\217Qgrpc_stats\022\r\022\013\022\002\010\006\"\005\010\200\200\200\001\032\014\022\n\n\001t\"\005\010\200\200\200\001\"\006\022\001(\032\001r*\005\n\003:\001=2\351\010\n\346\010*\343\010\n\010\n\006\010\200\200\200\200\004\022\326\010^^^^^j!^^.*..............................................*............................config {\n name: \"envoy.filters.http.jwt_authn\"\n typed....._config {\n type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAu........[thentication\"\n value: \"\\n=\\n\\022not_health_check_f\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n1\\n\\0A_]^06\\000\\000\\000\\000\\000\\002\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matche!^^.*..............................................*............................config {\n name: \"envoy.filters.http.jwt_authn\"\n typed....._config {\n type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAu........[thentication\"\n value: \"\\n=\\n\\022not_health_check_f\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n1\\n\\0A_]^06\\000\\000\\000\\000\\000\\002\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n+\\n\\000\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\r/v3/number.\\n+\\n\\000\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\"\n }\n}\nB\003\n\001A" } } diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 index 5bb334c905025..2122b9078716f 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 @@ -1,7 +1,7 @@ config { name: "envoy.filters.http.admission_control" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl" value: "\022\000\032\000*\003\022\001$" } } diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-4784906297278464 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-4784906297278464 index c91cc6a64987e..8f8c418d77a18 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-4784906297278464 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-4784906297278464 @@ -1,7 +1,7 @@ config { name: "envoy.filters.http.admission_control" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl" value: "\022\000\032\002\020\002*\016\n\t\t+\000\000\000\000\000\000\000\022\001$" } } @@ -20,4 +20,4 @@ upstream_data { data: "=" data: "?" } -} \ No newline at end of file +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 index 1d3dd81ed0ecb..e5d44679530ec 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 @@ -1,7 +1,7 @@ config { name: "envoy.filters.http.admission_control" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl" value: "\022\000" } } diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index 3c9c0958438d2..30c0870e06aa4 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -1,5 +1,7 @@ #pragma once +#include "source/common/stats/custom_stat_namespaces_impl.h" + #include "test/extensions/filters/http/common/fuzz/http_filter_fuzzer.h" #include "test/fuzz/utility.h" #include "test/mocks/buffer/mocks.h" @@ -45,6 +47,7 @@ class UberFilterFuzzer : public HttpFilterFuzzer { envoy::config::core::v3::Metadata listener_metadata_; NiceMock stream_info_; TestScopedRuntime scoped_runtime_; + Stats::CustomStatNamespacesImpl custom_stat_namespaces_; // Filter constructed from the config. Http::StreamDecoderFilterSharedPtr decoder_filter_; diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index 366bcacb5b245..5a0265132bb28 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -136,6 +136,8 @@ void UberFilterFuzzer::perFilterSetup() { // Prepare expectations for WASM filter. ON_CALL(factory_context_, listenerMetadata()) .WillByDefault(testing::ReturnRef(listener_metadata_)); + ON_CALL(factory_context_.api_, customStatNamespaces()) + .WillByDefault(testing::ReturnRef(custom_stat_namespaces_)); } } // namespace HttpFilters diff --git a/test/extensions/filters/http/composite/filter_test.cc b/test/extensions/filters/http/composite/filter_test.cc index f6591d75f9646..93f65c5bd3974 100644 --- a/test/extensions/filters/http/composite/filter_test.cc +++ b/test/extensions/filters/http/composite/filter_test.cc @@ -17,7 +17,7 @@ namespace { class FilterTest : public ::testing::Test { public: - FilterTest() : filter_(stats_) { + FilterTest() : filter_(stats_, decoder_callbacks_.dispatcher()) { filter_.setDecoderFilterCallbacks(decoder_callbacks_); filter_.setEncoderFilterCallbacks(encoder_callbacks_); } @@ -72,7 +72,7 @@ class FilterTest : public ::testing::Test { filter_.encodeComplete(); } - Http::MockStreamDecoderFilterCallbacks decoder_callbacks_; + testing::NiceMock decoder_callbacks_; Http::MockStreamEncoderFilterCallbacks encoder_callbacks_; Stats::MockCounter error_counter_; Stats::MockCounter success_counter_; diff --git a/test/extensions/filters/http/cors/cors_filter_test.cc b/test/extensions/filters/http/cors/cors_filter_test.cc index 8f7da8e214464..31c7f211cd25a 100644 --- a/test/extensions/filters/http/cors/cors_filter_test.cc +++ b/test/extensions/filters/http/cors/cors_filter_test.cc @@ -705,11 +705,12 @@ TEST_F(CorsFilterTest, OptionsRequestNotMatchingOriginByRegex) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_)); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(CorsFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.cors"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/csrf/csrf_filter_test.cc b/test/extensions/filters/http/csrf/csrf_filter_test.cc index 489ee5b205765..e9be66e116183 100644 --- a/test/extensions/filters/http/csrf/csrf_filter_test.cc +++ b/test/extensions/filters/http/csrf/csrf_filter_test.cc @@ -450,11 +450,12 @@ TEST_F(CsrfFilterTest, RequestFromInvalidAdditionalRegexOrigin) { EXPECT_EQ(0U, config_->stats().request_valid_.value()); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(CsrfFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.csrf"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index a621e3c1ad15f..570fc3061359c 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -18,7 +18,8 @@ class ProxyFilterIntegrationTest : public testing::TestWithParammutable_validate_clusters()->set_value(false); }); + [override_auto_sni_header]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + hcm.mutable_route_config()->mutable_validate_clusters()->set_value(false); + }); // Setup the initial CDS cluster. cluster_.mutable_connect_timeout()->CopyFrom( @@ -64,6 +68,10 @@ name: dynamic_forward_proxy ConfigHelper::HttpProtocolOptions protocol_options; protocol_options.mutable_upstream_http_protocol_options()->set_auto_sni(true); + if (!override_auto_sni_header.empty()) { + protocol_options.mutable_upstream_http_protocol_options()->set_override_auto_sni_header( + override_auto_sni_header); + } protocol_options.mutable_upstream_http_protocol_options()->set_auto_san_validation(true); protocol_options.mutable_explicit_http_config()->mutable_http_protocol_options(); ConfigHelper::setProtocolOptions(cluster_, protocol_options); @@ -122,7 +130,9 @@ name: envoy.clusters.dynamic_forward_proxy if (write_cache_file_) { std::string host = fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port()); - std::string value = fake_upstreams_[0]->localAddress()->asString(); + std::string value = + absl::StrCat(Network::Test::getLoopbackAddressUrlString(version_), ":", + fake_upstreams_[0]->localAddress()->ip()->port(), "|1000000|0"); TestEnvironment::writeStringToFileForTest( "dns_cache.txt", absl::StrCat(host.length(), "\n", host, value.length(), "\n", value)); } @@ -164,6 +174,21 @@ TEST_P(ProxyFilterIntegrationTest, RequestWithBody) { EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); } +// Currently if the first DNS resolution fails, the filter will continue with +// a null address. Make sure this mode fails gracefully. +TEST_P(ProxyFilterIntegrationTest, RequestWithUnknownDomain) { + initializeWithArgs(); + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "doesnotexist.example.com"}}; + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + // Verify that after we populate the cache and reload the cluster we reattach to the cache with // its existing hosts. TEST_P(ProxyFilterIntegrationTest, ReloadClusterAndAttachToCache) { @@ -279,6 +304,34 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTls) { checkSimpleRequestSuccess(0, 0, response.get()); } +// Verify that `override_auto_sni_header` can be used along with auto_sni to set +// SNI from an arbitrary header. +TEST_P(ProxyFilterIntegrationTest, UpstreamTlsWithAltHeaderSni) { + upstream_tls_ = true; + initializeWithArgs(1024, 1024, "x-host"); + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("{}:{}", fake_upstreams_[0]->localAddress()->ip()->addressAsString().c_str(), + fake_upstreams_[0]->localAddress()->ip()->port())}, + {"x-host", "localhost"}}; + + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + + const Extensions::TransportSockets::Tls::SslHandshakerImpl* ssl_socket = + dynamic_cast( + fake_upstream_connection_->connection().ssl().get()); + EXPECT_STREQ("localhost", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); + + upstream_request_->encodeHeaders(default_response_headers_, true); + ASSERT_TRUE(response->waitForEndStream()); + checkSimpleRequestSuccess(0, 0, response.get()); +} + TEST_P(ProxyFilterIntegrationTest, UpstreamTlsWithIpHost) { upstream_tls_ = true; initializeWithArgs(); @@ -344,8 +397,6 @@ TEST_P(ProxyFilterIntegrationTest, DnsCacheCircuitBreakersInvoked) { EXPECT_EQ("503", response->headers().Status()->value().getStringView()); } -#ifndef WIN32 -// TODO(alyssawilk) figure out why this test doesn't pass on windows. TEST_P(ProxyFilterIntegrationTest, UseCacheFile) { write_cache_file_ = true; @@ -359,10 +410,8 @@ TEST_P(ProxyFilterIntegrationTest, UseCacheFile) { sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024); checkSimpleRequestSuccess(1024, 1024, response.get()); EXPECT_EQ(1, test_server_->counter("dns_cache.foo.cache_load")->value()); - EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_query_attempt")->value()); EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); } -#endif } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc index 72a611f703a2a..c2150e500dd91 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc @@ -1,6 +1,7 @@ #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h" +#include "source/common/stream_info/upstream_address.h" #include "source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h" #include "source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h" @@ -9,7 +10,6 @@ #include "test/mocks/upstream/basic_resource_limit.h" #include "test/mocks/upstream/cluster_manager.h" #include "test/mocks/upstream/transport_socket_match.h" -#include "test/test_common/test_runtime.h" using testing::AtLeast; using testing::Eq; @@ -31,19 +31,31 @@ using MockLoadDnsCacheEntryResult = class ProxyFilterTest : public testing::Test, public Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory { public: - ProxyFilterTest() { + void SetUp() override { + setupSocketMatcher(); + setupFilter(); + setupCluster(); + } + + void setupSocketMatcher() { cm_.initializeThreadLocalClusters({"fake_cluster"}); transport_socket_match_ = new NiceMock( Network::TransportSocketFactoryPtr(transport_socket_factory_)); cm_.thread_local_cluster_.cluster_.info_->transport_socket_matcher_.reset( transport_socket_match_); + } - envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig proto_config; + virtual void setupFilter() { EXPECT_CALL(*dns_cache_manager_, getCache(_)); + + envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig proto_config; filter_config_ = std::make_shared(proto_config, *this, cm_); filter_ = std::make_unique(filter_config_); + filter_->setDecoderFilterCallbacks(callbacks_); + } + void setupCluster() { // Allow for an otherwise strict mock. EXPECT_CALL(callbacks_, connection()).Times(AtLeast(0)); EXPECT_CALL(callbacks_, streamId()).Times(AtLeast(0)); @@ -330,6 +342,175 @@ TEST_F(ProxyFilterTest, HostRewriteViaHeader) { filter_->onDestroy(); } +class UpstreamResolvedHostFilterStateHelper : public ProxyFilterTest { +public: + void setupFilter() override { + EXPECT_CALL(*dns_cache_manager_, getCache(_)); + + envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig proto_config; + proto_config.set_save_upstream_address(true); + + filter_config_ = std::make_shared(proto_config, *this, cm_); + filter_ = std::make_unique(filter_config_); + + filter_->setDecoderFilterCallbacks(callbacks_); + } +}; + +// Tests if address set is populated in the filter state when an upstream host is resolved +// successfully. +TEST_F(UpstreamResolvedHostFilterStateHelper, AddResolvedHostFilterStateMetadata) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); + + EXPECT_CALL(callbacks_, streamInfo()); + auto& filter_state = callbacks_.streamInfo().filterState(); + + InSequence s; + + // Setup test host + auto host_info = std::make_shared(); + host_info->address_ = Network::Utility::parseInternetAddress("1.2.3.4", 80); + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, getThreadLocalCluster(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_()) + .WillOnce(Return(circuit_breakers_)); + EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); + + EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 80, _)) + .WillOnce(Invoke([&](absl::string_view, uint16_t, ProxyFilter::LoadDnsCacheEntryCallbacks&) { + return MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::InCache, nullptr, host_info}; + })); + + EXPECT_CALL(*dns_cache_manager_->dns_cache_, getHost(_)) + .WillOnce( + Invoke([&](absl::string_view) + -> absl::optional { + return host_info; + })); + + EXPECT_CALL(*host_info, address()); + + EXPECT_CALL(callbacks_, streamInfo()); + + // Host was resolved successfully, so continue filter iteration. + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + + // We expect FilterState to be populated + EXPECT_TRUE( + filter_state->hasData(StreamInfo::UpstreamAddress::key())); + + filter_->onDestroy(); +} + +// Tests if an already existing address set in filter state is updated when upstream host is +// resolved successfully. +TEST_F(UpstreamResolvedHostFilterStateHelper, UpdateResolvedHostFilterStateMetadata) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); + + EXPECT_CALL(callbacks_, streamInfo()); + + // Pre-populate the filter state with an address. + auto& filter_state = callbacks_.streamInfo().filterState(); + const auto pre_address = Network::Utility::parseInternetAddress("1.2.3.3", 80); + auto address_obj = std::make_unique(); + address_obj->address_ = pre_address; + filter_state->setData(StreamInfo::UpstreamAddress::key(), std::move(address_obj), + StreamInfo::FilterState::StateType::Mutable, + StreamInfo::FilterState::LifeSpan::Request); + + InSequence s; + + // Setup test host + auto host_info = std::make_shared(); + host_info->address_ = Network::Utility::parseInternetAddress("1.2.3.4", 80); + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, getThreadLocalCluster(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_()) + .WillOnce(Return(circuit_breakers_)); + EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); + + EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 80, _)) + .WillOnce(Invoke([&](absl::string_view, uint16_t, ProxyFilter::LoadDnsCacheEntryCallbacks&) { + return MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::InCache, nullptr, host_info}; + })); + + EXPECT_CALL(*dns_cache_manager_->dns_cache_, getHost(_)) + .WillOnce( + Invoke([&](absl::string_view) + -> absl::optional { + return host_info; + })); + + EXPECT_CALL(*host_info, address()); + + EXPECT_CALL(callbacks_, streamInfo()); + + // Host was resolved successfully, so continue filter iteration. + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + + // We expect FilterState to be populated + EXPECT_TRUE( + filter_state->hasData(StreamInfo::UpstreamAddress::key())); + + const StreamInfo::UpstreamAddress& updated_address_obj = + filter_state->getDataReadOnly( + StreamInfo::UpstreamAddress::key()); + + // Verify the data + EXPECT_TRUE(updated_address_obj.address_); + EXPECT_EQ(updated_address_obj.address_->asStringView(), host_info->address_->asStringView()); + + filter_->onDestroy(); +} + +// Tests if address set is populated in the filter state when an upstream host is resolved +// successfully but is null. +TEST_F(UpstreamResolvedHostFilterStateHelper, IgnoreFilterStateMetadataNullAddress) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); + + EXPECT_CALL(callbacks_, streamInfo()); + auto& filter_state = callbacks_.streamInfo().filterState(); + + InSequence s; + + // Setup test host + auto host_info = std::make_shared(); + host_info->address_ = nullptr; + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, getThreadLocalCluster(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_()) + .WillOnce(Return(circuit_breakers_)); + EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); + + EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 80, _)) + .WillOnce(Invoke([&](absl::string_view, uint16_t, ProxyFilter::LoadDnsCacheEntryCallbacks&) { + return MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::InCache, nullptr, host_info}; + })); + + EXPECT_CALL(*dns_cache_manager_->dns_cache_, getHost(_)) + .WillOnce( + Invoke([&](absl::string_view) + -> absl::optional { + return host_info; + })); + + EXPECT_CALL(*host_info, address()); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + + // We do not expect FilterState to be populated + EXPECT_FALSE( + filter_state->hasData(StreamInfo::UpstreamAddress::key())); + + filter_->onDestroy(); +} + } // namespace } // namespace DynamicForwardProxy } // namespace HttpFilters diff --git a/test/extensions/filters/http/dynamo/config_test.cc b/test/extensions/filters/http/dynamo/config_test.cc index aca5c3a776546..36abb93545c69 100644 --- a/test/extensions/filters/http/dynamo/config_test.cc +++ b/test/extensions/filters/http/dynamo/config_test.cc @@ -26,11 +26,12 @@ TEST(DynamoFilterConfigTest, DynamoFilter) { cb(filter_callback); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(DynamoFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.http_dynamo_filter"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/ext_authz/BUILD b/test/extensions/filters/http/ext_authz/BUILD index 6076c5d7b3cfe..4bdae57469fe1 100644 --- a/test/extensions/filters/http/ext_authz/BUILD +++ b/test/extensions/filters/http/ext_authz/BUILD @@ -46,8 +46,12 @@ envoy_extension_cc_test( srcs = ["config_test.cc"], extension_names = ["envoy.filters.http.ext_authz"], deps = [ + "//source/common/grpc:async_client_manager_lib", + "//source/common/network:address_lib", + "//source/common/thread_local:thread_local_lib", "//source/extensions/filters/http/ext_authz:config", "//test/mocks/server:factory_context_mocks", + "//test/test_common:real_threads_test_helper_lib", "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index 25f626a7747b8..da59c41c5125e 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -3,10 +3,13 @@ #include "envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.validate.h" #include "envoy/stats/scope.h" +#include "source/common/grpc/async_client_manager_impl.h" +#include "source/common/network/address_impl.h" +#include "source/common/thread_local/thread_local_impl.h" #include "source/extensions/filters/http/ext_authz/config.h" #include "test/mocks/server/factory_context.h" -#include "test/test_common/test_runtime.h" +#include "test/test_common/real_threads_test_helper.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -14,84 +17,104 @@ using testing::_; using testing::Invoke; +using testing::NiceMock; +using testing::StrictMock; namespace Envoy { namespace Extensions { namespace HttpFilters { namespace ExtAuthz { -namespace { - -void expectCorrectProtoGrpc(std::string const& grpc_service_yaml) { - ExtAuthzFilterConfig factory; - ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); - TestUtility::loadFromYaml(grpc_service_yaml, *proto_config); - - testing::StrictMock context; - testing::StrictMock server_context; - EXPECT_CALL(context, getServerFactoryContext()) - .WillRepeatedly(testing::ReturnRef(server_context)); - EXPECT_CALL(context, messageValidationVisitor()); - EXPECT_CALL(context, clusterManager()).Times(2); - EXPECT_CALL(context, runtime()); - EXPECT_CALL(context, scope()).Times(3); - - Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); - Http::MockFilterChainFactoryCallbacks filter_callback; - EXPECT_CALL(filter_callback, addStreamFilter(_)); - // Expect the raw async client to be created inside the callback. - // The creation of the filter callback is in main thread while the execution of callback is in - // worker thread. Because of the thread local cache of async client, it must be created in worker - // thread inside the callback. - EXPECT_CALL(context.cluster_manager_.async_client_manager_, getOrCreateRawAsyncClient(_, _, _, _)) - .WillOnce(Invoke( - [](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool, Grpc::CacheOption) { - return std::make_unique>(); - })); - cb(filter_callback); - - Thread::ThreadPtr thread = Thread::threadFactoryForTest().createThread([&context, cb]() { - Http::MockFilterChainFactoryCallbacks filter_callback; - EXPECT_CALL(filter_callback, addStreamFilter(_)); - // Execute the filter factory callback in another thread. - EXPECT_CALL(context.cluster_manager_.async_client_manager_, - getOrCreateRawAsyncClient(_, _, _, _)) - .WillOnce(Invoke( - [](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool, - Grpc::CacheOption) { return std::make_unique>(); })); - cb(filter_callback); - }); - thread->join(); -} -} // namespace +class TestAsyncClientManagerImpl : public Grpc::AsyncClientManagerImpl { +public: + TestAsyncClientManagerImpl(Upstream::ClusterManager& cm, ThreadLocal::Instance& tls, + TimeSource& time_source, Api::Api& api, + const Grpc::StatNames& stat_names) + : Grpc::AsyncClientManagerImpl(cm, tls, time_source, api, stat_names) {} + Grpc::AsyncClientFactoryPtr factoryForGrpcService(const envoy::config::core::v3::GrpcService&, + Stats::Scope&, bool) override { + return std::make_unique>(); + } +}; -TEST(HttpExtAuthzConfigTest, CorrectProtoGoogleGrpc) { - std::string google_grpc_service_yaml = R"EOF( - transport_api_version: V3 - grpc_service: - google_grpc: - target_uri: ext_authz_server - stat_prefix: google - failure_mode_allow: false - transport_api_version: V3 - )EOF"; - expectCorrectProtoGrpc(google_grpc_service_yaml); -} +class ExtAuthzFilterTest : public Event::TestUsingSimulatedTime, + public Thread::RealThreadsTestHelper, + public testing::Test { +public: + ExtAuthzFilterTest() : RealThreadsTestHelper(5), stat_names_(symbol_table_) { + runOnMainBlocking([&]() { + async_client_manager_ = std::make_unique( + context_.cluster_manager_, tls(), api().timeSource(), api(), stat_names_); + }); + } -TEST(HttpExtAuthzConfigTest, CorrectProtoEnvoyGrpc) { - std::string envoy_grpc_service_yaml = R"EOF( - transport_api_version: V3 - grpc_service: - envoy_grpc: - cluster_name: ext_authz_server - failure_mode_allow: false - transport_api_version: V3 - )EOF"; - expectCorrectProtoGrpc(envoy_grpc_service_yaml); -} + ~ExtAuthzFilterTest() override { + // Reset the async client manager before shutdown threading. + // Because its dtor will try to post to event loop to clear thread local slot. + runOnMainBlocking([&]() { async_client_manager_.reset(); }); + // TODO(chaoqin-li1123): clean this up when we figure out how to free the threading resources in + // RealThreadsTestHelper. + shutdownThreading(); + exitThreads(); + } + + Http::FilterFactoryCb createFilterFactory( + const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& ext_authz_config) { + // Delegate call to mock async client manager to real async client manager. + ON_CALL(context_, getServerFactoryContext()).WillByDefault(testing::ReturnRef(server_context_)); + ON_CALL(context_.cluster_manager_.async_client_manager_, getOrCreateRawAsyncClient(_, _, _, _)) + .WillByDefault(Invoke([&](const envoy::config::core::v3::GrpcService& config, + Stats::Scope& scope, bool skip_cluster_check, + Grpc::CacheOption cache_option) { + return async_client_manager_->getOrCreateRawAsyncClient(config, scope, skip_cluster_check, + cache_option); + })); + ExtAuthzFilterConfig factory; + return factory.createFilterFactoryFromProto(ext_authz_config, "stats", context_); + } + + Http::StreamFilterSharedPtr createFilterFromFilterFactory(Http::FilterFactoryCb filter_factory) { + StrictMock filter_callbacks; + + Http::StreamFilterSharedPtr filter; + EXPECT_CALL(filter_callbacks, addStreamFilter(_)).WillOnce(::testing::SaveArg<0>(&filter)); + filter_factory(filter_callbacks); + return filter; + } + +private: + NiceMock server_context_; + Stats::SymbolTableImpl symbol_table_; + Grpc::StatNames stat_names_; -TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { - std::string yaml = R"EOF( +protected: + NiceMock context_; + std::unique_ptr async_client_manager_; +}; + +class ExtAuthzFilterHttpTest : public ExtAuthzFilterTest { +public: + void testFilterFactory(const std::string& ext_authz_config_yaml) { + envoy::extensions::filters::http::ext_authz::v3::ExtAuthz ext_authz_config; + Http::FilterFactoryCb filter_factory; + // Load config and create filter factory in main thread. + runOnMainBlocking([&]() { + TestUtility::loadFromYaml(ext_authz_config_yaml, ext_authz_config); + filter_factory = createFilterFactory(ext_authz_config); + }); + + // Create filter from filter factory per thread. + for (int i = 0; i < 5; i++) { + runOnAllWorkersBlocking([&, filter_factory]() { + Http::StreamFilterSharedPtr filter = createFilterFromFilterFactory(filter_factory); + EXPECT_NE(filter, nullptr); + }); + } + } +}; + +TEST_F(ExtAuthzFilterHttpTest, ExtAuthzFilterFactoryTestHttp) { + const std::string ext_authz_config_yaml = R"EOF( stat_prefix: "wall" transport_api_version: V3 http_service: @@ -132,29 +155,98 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { max_request_bytes: 100 pack_as_bytes: true )EOF"; + testFilterFactory(ext_authz_config_yaml); +} + +class ExtAuthzFilterGrpcTest : public ExtAuthzFilterTest { +public: + void testFilterFactoryAndFilterWithGrpcClient(const std::string& ext_authz_config_yaml) { + envoy::extensions::filters::http::ext_authz::v3::ExtAuthz ext_authz_config; + Http::FilterFactoryCb filter_factory; + runOnMainBlocking([&]() { + TestUtility::loadFromYaml(ext_authz_config_yaml, ext_authz_config); + filter_factory = createFilterFactory(ext_authz_config); + }); + + int request_sent_per_thread = 5; + // Initialize address instance to prepare for grpc traffic. + initAddress(); + // Create filter from filter factory per thread and send grpc request. + for (int i = 0; i < request_sent_per_thread; i++) { + runOnAllWorkersBlocking([&, filter_factory]() { + Http::StreamFilterSharedPtr filter = createFilterFromFilterFactory(filter_factory); + testExtAuthzFilter(filter); + }); + } + runOnAllWorkersBlocking( + [&]() { expectGrpcClientSentRequest(ext_authz_config, request_sent_per_thread); }); + } + +private: + void initAddress() { + addr_ = std::make_shared("1.2.3.4", 1111); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); + } + + void testExtAuthzFilter(Http::StreamFilterSharedPtr filter) { + EXPECT_NE(filter, nullptr); + Http::TestRequestHeaderMapImpl request_headers; + NiceMock decoder_callbacks; + ON_CALL(decoder_callbacks, connection()).WillByDefault(Return(&connection_)); + filter->setDecoderFilterCallbacks(decoder_callbacks); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter->decodeHeaders(request_headers, false)); + std::shared_ptr decoder_filter = filter; + decoder_filter->onDestroy(); + } + + void expectGrpcClientSentRequest( + const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& ext_authz_config, + int requests_sent_per_thread) { + Grpc::RawAsyncClientSharedPtr async_client = async_client_manager_->getOrCreateRawAsyncClient( + ext_authz_config.grpc_service(), context_.scope(), false, Grpc::CacheOption::AlwaysCache); + Grpc::MockAsyncClient* mock_async_client = + dynamic_cast(async_client.get()); + EXPECT_NE(mock_async_client, nullptr); + // All the request in this thread should be sent through the same async client because the async + // client is cached. + EXPECT_EQ(mock_async_client->send_count_, requests_sent_per_thread); + } + + Network::Address::InstanceConstSharedPtr addr_; + NiceMock connection_; +}; - ExtAuthzFilterConfig factory; - ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); - TestUtility::loadFromYaml(yaml, *proto_config); - testing::StrictMock context; - testing::StrictMock server_context; - EXPECT_CALL(context, getServerFactoryContext()) - .WillRepeatedly(testing::ReturnRef(server_context)); - EXPECT_CALL(context, messageValidationVisitor()); - EXPECT_CALL(context, clusterManager()); - EXPECT_CALL(context, runtime()); - EXPECT_CALL(context, scope()); - Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); - testing::StrictMock filter_callback; - EXPECT_CALL(filter_callback, addStreamFilter(_)); - cb(filter_callback); +TEST_F(ExtAuthzFilterGrpcTest, EnvoyGrpc) { + const std::string ext_authz_config_yaml = R"EOF( + transport_api_version: V3 + grpc_service: + envoy_grpc: + cluster_name: test_cluster + failure_mode_allow: false + )EOF"; + testFilterFactoryAndFilterWithGrpcClient(ext_authz_config_yaml); +} + +TEST_F(ExtAuthzFilterGrpcTest, GoogleGrpc) { + const std::string ext_authz_config_yaml = R"EOF( + transport_api_version: V3 + grpc_service: + google_grpc: + target_uri: ext_authz_server + stat_prefix: google + failure_mode_allow: false + )EOF"; + testFilterFactoryAndFilterWithGrpcClient(ext_authz_config_yaml); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(HttpExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.ext_authz"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index 50600d0e7bccb..2f27c478f61bb 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -188,7 +188,11 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, result = upstream_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"replaceable", "set-by-upstream"}, + {"set-cookie", "cookie1=snickerdoodle"}}, + false); upstream_request_->encodeData(response_size_, true); for (const auto& header_to_add : headers_to_add) { @@ -256,7 +260,8 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, const Headers& headers_to_remove, const Http::TestRequestHeaderMapImpl& new_headers_from_upstream, const Http::TestRequestHeaderMapImpl& headers_to_append_multiple, - const Headers& response_headers_to_add) { + const Headers& response_headers_to_append, + const Headers& response_headers_to_set = {}) { ext_authz_request_->startGrpcStream(); envoy::service::auth::v3::CheckResponse check_response; check_response.mutable_status()->set_code(Grpc::Status::WellKnownGrpcStatus::Ok); @@ -306,17 +311,29 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, return Http::HeaderMap::Iterate::Continue; }); - for (const auto& response_header_to_add : response_headers_to_add) { + for (const auto& response_header_to_add : response_headers_to_append) { auto* entry = check_response.mutable_ok_response()->mutable_response_headers_to_add()->Add(); const auto key = std::string(response_header_to_add.first); const auto value = std::string(response_header_to_add.second); - entry->mutable_append()->set_value(false); + entry->mutable_append()->set_value(true); entry->mutable_header()->set_key(key); entry->mutable_header()->set_value(value); ENVOY_LOG_MISC(trace, "sendExtAuthzResponse: set response_header_to_add {}={}", key, value); } + for (const auto& response_header_to_set : response_headers_to_set) { + auto* entry = check_response.mutable_ok_response()->mutable_response_headers_to_add()->Add(); + const auto key = std::string(response_header_to_set.first); + const auto value = std::string(response_header_to_set.second); + + // Replaces the one sent by the upstream. + entry->mutable_append()->set_value(false); + entry->mutable_header()->set_key(key); + entry->mutable_header()->set_value(value); + ENVOY_LOG_MISC(trace, "sendExtAuthzResponse: set response_header_to_set {}={}", key, value); + } + ext_authz_request_->sendGrpcMessage(check_response); ext_authz_request_->finishGrpcStream(Grpc::Status::Ok); } @@ -674,18 +691,27 @@ TEST_P(ExtAuthzGrpcIntegrationTest, DownstreamHeadersOnSuccess) { waitForExtAuthzRequest(expectedCheckRequest(Http::CodecType::HTTP1)); // Send back an ext_authz response with response_headers_to_add set. - sendExtAuthzResponse(Headers{}, Headers{}, Headers{}, Http::TestRequestHeaderMapImpl{}, - Http::TestRequestHeaderMapImpl{}, - Headers{{"downstream2", "downstream-should-see-me"}}); + sendExtAuthzResponse( + Headers{}, Headers{}, Headers{}, Http::TestRequestHeaderMapImpl{}, + Http::TestRequestHeaderMapImpl{}, + Headers{{"downstream2", "downstream-should-see-me"}, {"set-cookie", "cookie2=gingerbread"}}, + Headers{{"replaceable", "by-ext-authz"}}); // Wait for the upstream response. waitForSuccessfulUpstreamResponse("200"); + EXPECT_EQ(Http::HeaderUtility::getAllOfHeaderAsString(response_->headers(), + Http::LowerCaseString("set-cookie")) + .result() + .value(), + "cookie1=snickerdoodle,cookie2=gingerbread"); + // Verify the response is HTTP 200 with the header from `response_headers_to_add` above. const std::string expected_body(response_size_, 'a'); verifyResponse(std::move(response_), "200", Http::TestResponseHeaderMapImpl{{":status", "200"}, - {"downstream2", "downstream-should-see-me"}}, + {"downstream2", "downstream-should-see-me"}, + {"replaceable", "by-ext-authz"}}, expected_body); cleanup(); } diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 18283f359bc95..8f3033bd225a9 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -70,6 +70,49 @@ template class HttpFilterTestBase : public T { connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); } + void queryParameterTest(const std::string& original_path, const std::string& expected_path, + const Http::Utility::QueryParamsVector& add_me, + const std::vector& remove_me) { + InSequence s; + + // Set up all the typical headers plus a path with a query string that we'll remove later. + request_headers_.addCopy(Http::Headers::get().Host, "example.com"); + request_headers_.addCopy(Http::Headers::get().Method, "GET"); + request_headers_.addCopy(Http::Headers::get().Path, original_path); + request_headers_.addCopy(Http::Headers::get().Scheme, "https"); + + prepareCheck(); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.query_parameters_to_set = add_me; + response.query_parameters_to_remove = remove_me; + + auto response_ptr = std::make_unique(response); + + EXPECT_CALL(*client_, check(_, _, _, _)) + .WillOnce(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks, + const envoy::service::auth::v3::CheckRequest&, Tracing::Span&, + const StreamInfo::StreamInfo&) -> void { + callbacks.onComplete(std::move(response_ptr)); + })); + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(request_headers_.getPathValue(), expected_path); + + Buffer::OwnedImpl response_data{}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + Http::TestResponseTrailerMapImpl response_trailers{}; + Http::MetadataMap response_metadata{}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(response_metadata)); + } + NiceMock stats_store_; envoy::config::bootstrap::v3::Bootstrap bootstrap_; FilterConfigSharedPtr config_; @@ -1744,8 +1787,13 @@ TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { response.headers_to_append = Http::HeaderVector{{request_header_key, "bar"}}; response.headers_to_set = Http::HeaderVector{{key_to_add, "foo"}, {key_to_override, "bar"}}; response.headers_to_remove = std::vector{key_to_remove}; + // This cookie will be appended to the encoded headers. response.response_headers_to_add = - Http::HeaderVector{{Http::LowerCaseString{"cookie"}, "flavor=gingerbread"}}; + Http::HeaderVector{{Http::LowerCaseString{"set-cookie"}, "cookie2=gingerbread"}}; + // This "should-be-overridden" header value from the auth server will override the + // "should-be-overridden" entry from the upstream server. + response.response_headers_to_set = Http::HeaderVector{ + {Http::LowerCaseString{"should-be-overridden"}, "finally-set-by-auth-server"}}; auto response_ptr = std::make_unique(response); @@ -1766,14 +1814,70 @@ TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { EXPECT_EQ(request_headers_.has(key_to_remove), false); Buffer::OwnedImpl response_data{}; - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "200"}, + {"set-cookie", "cookie1=snickerdoodle"}, + {"should-be-overridden", "originally-set-by-upstream"}}; Http::TestResponseTrailerMapImpl response_trailers{}; Http::MetadataMap response_metadata{}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(response_metadata)); - EXPECT_EQ(response_headers.get_("cookie"), "flavor=gingerbread"); + EXPECT_EQ(Http::HeaderUtility::getAllOfHeaderAsString(response_headers, + Http::LowerCaseString("set-cookie")) + .result() + .value(), + "cookie1=snickerdoodle,cookie2=gingerbread"); + EXPECT_EQ(response_headers.get_("should-be-overridden"), "finally-set-by-auth-server"); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithUnmodifiedQueryParameters) { + const std::string original_path{"/users?leave-me=alone"}; + const std::string expected_path{"/users?leave-me=alone"}; + const Http::Utility::QueryParamsVector add_me{}; + const std::vector remove_me{"remove-me"}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithAddedQueryParameters) { + const std::string original_path{"/users"}; + const std::string expected_path{"/users?add-me=123"}; + const Http::Utility::QueryParamsVector add_me{{"add-me", "123"}}; + const std::vector remove_me{}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithAddedAndRemovedQueryParameters) { + const std::string original_path{"/users?remove-me=123"}; + const std::string expected_path{"/users?add-me=456"}; + const Http::Utility::QueryParamsVector add_me{{"add-me", "456"}}; + const std::vector remove_me{{"remove-me"}}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithRemovedQueryParameters) { + const std::string original_path{"/users?remove-me=definitely"}; + const std::string expected_path{"/users"}; + const Http::Utility::QueryParamsVector add_me{}; + const std::vector remove_me{{"remove-me"}}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithOverwrittenQueryParameters) { + const std::string original_path{"/users?overwrite-me=original"}; + const std::string expected_path{"/users?overwrite-me=new"}; + const Http::Utility::QueryParamsVector add_me{{"overwrite-me", "new"}}; + const std::vector remove_me{}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithManyModifiedQueryParameters) { + const std::string original_path{"/users?remove-me=1&overwrite-me=2&leave-me=3"}; + const std::string expected_path{"/users?add-me=9&leave-me=3&overwrite-me=new"}; + const Http::Utility::QueryParamsVector add_me{{"add-me", "9"}, {"overwrite-me", "new"}}; + const std::vector remove_me{{"remove-me"}}; + queryParameterTest(original_path, expected_path, add_me, remove_me); } // Test that an synchronous denied response from the authorization service, on the call stack, diff --git a/test/extensions/filters/http/ext_proc/BUILD b/test/extensions/filters/http/ext_proc/BUILD index 94a5abbb88ff2..cbe1ca689ac89 100644 --- a/test/extensions/filters/http/ext_proc/BUILD +++ b/test/extensions/filters/http/ext_proc/BUILD @@ -38,7 +38,7 @@ envoy_extension_cc_test( "//test/mocks/event:event_mocks", "//test/mocks/server:factory_context_mocks", "//test/test_common:test_runtime_lib", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -105,8 +105,8 @@ envoy_extension_cc_test( "//test/common/http:common_lib", "//test/integration:http_integration_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -124,8 +124,8 @@ envoy_extension_cc_test( "//test/integration:http_integration_lib", "//test/test_common:utility_lib", "@com_google_absl//absl/strings:str_format", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -139,8 +139,8 @@ envoy_extension_cc_test_library( "//test/test_common:network_utility_lib", "@com_github_grpc_grpc//:grpc++", "@com_google_absl//absl/strings:str_format", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_grpc", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_grpc", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -182,8 +182,8 @@ envoy_extension_cc_test_library( "//test/test_common:utility_lib", "@com_github_grpc_grpc//:grpc++", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) @@ -202,8 +202,8 @@ envoy_cc_fuzz_test( "//test/integration:http_integration_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/ext_proc/client_test.cc b/test/extensions/filters/http/ext_proc/client_test.cc index 07f0ecbb3bc2d..a95573d1dea51 100644 --- a/test/extensions/filters/http/ext_proc/client_test.cc +++ b/test/extensions/filters/http/ext_proc/client_test.cc @@ -6,12 +6,13 @@ #include "test/mocks/grpc/mocks.h" #include "test/mocks/stats/mocks.h" +#include "test/mocks/stream_info/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using testing::Invoke; using testing::Unused; @@ -32,23 +33,16 @@ class ExtProcStreamTest : public testing::Test, public ExternalProcessorCallback auto grpc = service.mutable_envoy_grpc(); grpc->set_cluster_name("test"); - EXPECT_CALL(client_manager_, factoryForGrpcService(_, _, _)) + EXPECT_CALL(client_manager_, getOrCreateRawAsyncClient(_, _, _, _)) .WillOnce(Invoke(this, &ExtProcStreamTest::doFactory)); client_ = std::make_unique(client_manager_, service, stats_store_); } - Grpc::AsyncClientFactoryPtr doFactory(Unused, Unused, Unused) { - auto factory = std::make_unique(); - EXPECT_CALL(*factory, createUncachedRawAsyncClient()) - .WillOnce(Invoke(this, &ExtProcStreamTest::doCreate)); - return factory; - } - - Grpc::RawAsyncClientPtr doCreate() { - auto async_client = std::make_unique(); + Grpc::RawAsyncClientSharedPtr doFactory(Unused, Unused, Unused, Unused) { + auto async_client = std::make_shared(); EXPECT_CALL(*async_client, - startRaw("envoy.service.ext_proc.v3alpha.ExternalProcessor", "Process", _, _)) + startRaw("envoy.service.ext_proc.v3.ExternalProcessor", "Process", _, _)) .WillOnce(Invoke(this, &ExtProcStreamTest::doStartRaw)); return async_client; } @@ -76,35 +70,38 @@ class ExtProcStreamTest : public testing::Test, public ExternalProcessorCallback Grpc::MockAsyncClientManager client_manager_; Grpc::MockAsyncStream stream_; Grpc::RawAsyncStreamCallbacks* stream_callbacks_; + testing::NiceMock stream_info_; testing::NiceMock stats_store_; }; TEST_F(ExtProcStreamTest, OpenCloseStream) { - auto stream = client_->start(*this); + auto stream = client_->start(*this, stream_info_); EXPECT_CALL(stream_, closeStream()); + EXPECT_CALL(stream_, resetStream()); stream->close(); } TEST_F(ExtProcStreamTest, SendToStream) { - auto stream = client_->start(*this); + auto stream = client_->start(*this, stream_info_); // Send something and ensure that we get it. Doesn't really matter what. EXPECT_CALL(stream_, sendMessageRaw_(_, false)); ProcessingRequest req; stream->send(std::move(req), false); EXPECT_CALL(stream_, closeStream()); + EXPECT_CALL(stream_, resetStream()); stream->close(); } TEST_F(ExtProcStreamTest, SendAndClose) { - auto stream = client_->start(*this); + auto stream = client_->start(*this, stream_info_); EXPECT_CALL(stream_, sendMessageRaw_(_, true)); ProcessingRequest req; stream->send(std::move(req), true); } TEST_F(ExtProcStreamTest, ReceiveFromStream) { - auto stream = client_->start(*this); + auto stream = client_->start(*this, stream_info_); ASSERT_NE(stream_callbacks_, nullptr); // Send something and ensure that we get it. Doesn't really matter what. ProcessingResponse resp; @@ -129,11 +126,12 @@ TEST_F(ExtProcStreamTest, ReceiveFromStream) { stream_callbacks_->onReceiveTrailingMetadata(std::move(empty_response_trailers)); EXPECT_CALL(stream_, closeStream()); + EXPECT_CALL(stream_, resetStream()); stream->close(); } TEST_F(ExtProcStreamTest, StreamClosed) { - auto stream = client_->start(*this); + auto stream = client_->start(*this, stream_info_); ASSERT_NE(stream_callbacks_, nullptr); EXPECT_FALSE(last_response_); EXPECT_FALSE(grpc_closed_); @@ -146,7 +144,7 @@ TEST_F(ExtProcStreamTest, StreamClosed) { } TEST_F(ExtProcStreamTest, StreamError) { - auto stream = client_->start(*this); + auto stream = client_->start(*this, stream_info_); ASSERT_NE(stream_callbacks_, nullptr); EXPECT_FALSE(last_response_); EXPECT_FALSE(grpc_closed_); diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc index c03cafa52ada7..3e8f1af52e334 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc @@ -18,8 +18,8 @@ // 7. Remove locks after crash is addressed by separate issue #include "envoy/config/core/v3/base.pb.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/type/v3/http_status.pb.h" #include "source/common/network/address_impl.h" @@ -36,9 +36,9 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; // The buffer size for the listeners static const uint32_t BufferSize = 100000; @@ -216,7 +216,7 @@ class ExtProcIntegrationFuzz : public HttpIntegrationTest, } } - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config_{}; + envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor proto_config_{}; TestProcessor test_processor_; Network::Address::IpVersion ip_version_; Grpc::ClientType client_type_; diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc index b5d5e112dffe4..336f0a4364f4d 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc @@ -1,8 +1,8 @@ #include "test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h" #include "envoy/config/core/v3/base.pb.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/type/v3/http_status.pb.h" #include "source/common/common/thread.h" @@ -16,12 +16,12 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeaderMutation; -using envoy::service::ext_proc::v3alpha::ImmediateResponse; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeaderMutation; +using envoy::service::ext_proc::v3::ImmediateResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using envoy::type::v3::StatusCode; const StatusCode HttpStatusCodes[] = { diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h index 1b5a6359dea43..30db02579306d 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h @@ -1,8 +1,8 @@ #pragma once #include "envoy/config/core/v3/base.pb.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/type/v3/http_status.pb.h" #include "source/common/common/thread.h" @@ -19,12 +19,12 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeaderMutation; -using envoy::service::ext_proc::v3alpha::ImmediateResponse; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeaderMutation; +using envoy::service::ext_proc::v3::ImmediateResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using envoy::type::v3::StatusCode; const uint32_t ExtProcFuzzMaxDataSize = 1024; diff --git a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc index 7dc977d926406..aeffd710edc20 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc @@ -1,8 +1,8 @@ #include -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" #include "envoy/network/address.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/extensions/filters/http/ext_proc/config.h" @@ -17,21 +17,21 @@ namespace Envoy { using envoy::config::route::v3::Route; using envoy::config::route::v3::VirtualHost; -using envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; +using envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; using envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager; using Envoy::Protobuf::MapPair; using Envoy::ProtobufWkt::Any; -using envoy::service::ext_proc::v3alpha::BodyResponse; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeadersResponse; -using envoy::service::ext_proc::v3alpha::HttpBody; -using envoy::service::ext_proc::v3alpha::HttpHeaders; -using envoy::service::ext_proc::v3alpha::HttpTrailers; -using envoy::service::ext_proc::v3alpha::ImmediateResponse; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; -using envoy::service::ext_proc::v3alpha::TrailersResponse; +using envoy::service::ext_proc::v3::BodyResponse; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeadersResponse; +using envoy::service::ext_proc::v3::HttpBody; +using envoy::service::ext_proc::v3::HttpHeaders; +using envoy::service::ext_proc::v3::HttpTrailers; +using envoy::service::ext_proc::v3::ImmediateResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; +using envoy::service::ext_proc::v3::TrailersResponse; using Extensions::HttpFilters::ExternalProcessing::HasNoHeader; using Extensions::HttpFilters::ExternalProcessing::HeaderProtosEqual; using Extensions::HttpFilters::ExternalProcessing::SingleHeaderValueIs; @@ -306,7 +306,7 @@ class ExtProcIntegrationTest : public HttpIntegrationTest, processor_stream_->sendGrpcMessage(response); } - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config_{}; + envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor proto_config_{}; FakeHttpConnectionPtr processor_connection_; FakeStreamPtr processor_stream_; }; diff --git a/test/extensions/filters/http/ext_proc/filter_test.cc b/test/extensions/filters/http/ext_proc/filter_test.cc index 86f1803d088ac..386f7401d48ff 100644 --- a/test/extensions/filters/http/ext_proc/filter_test.cc +++ b/test/extensions/filters/http/ext_proc/filter_test.cc @@ -1,4 +1,4 @@ -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/extensions/filters/http/ext_proc/ext_proc.h" @@ -10,6 +10,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/stream_info/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/cluster_manager.h" #include "test/test_common/printers.h" @@ -24,15 +25,15 @@ namespace HttpFilters { namespace ExternalProcessing { namespace { -using envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::BodyResponse; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeadersResponse; -using envoy::service::ext_proc::v3alpha::HttpBody; -using envoy::service::ext_proc::v3alpha::HttpHeaders; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::BodyResponse; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeadersResponse; +using envoy::service::ext_proc::v3::HttpBody; +using envoy::service::ext_proc::v3::HttpHeaders; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using Http::FilterDataStatus; using Http::FilterHeadersStatus; @@ -57,10 +58,11 @@ class HttpFilterTest : public testing::Test { void initialize(std::string&& yaml) { client_ = std::make_unique(); route_ = std::make_shared>(); - EXPECT_CALL(*client_, start(_)).WillOnce(Invoke(this, &HttpFilterTest::doStart)); + EXPECT_CALL(*client_, start(_, _)).WillOnce(Invoke(this, &HttpFilterTest::doStart)); EXPECT_CALL(encoder_callbacks_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); EXPECT_CALL(decoder_callbacks_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(Return(route_)); + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); EXPECT_CALL(dispatcher_, createTimer_(_)) .Times(AnyNumber()) .WillRepeatedly(Invoke([this](Unused) { @@ -75,7 +77,7 @@ class HttpFilterTest : public testing::Test { return timer; })); - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config{}; + envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor proto_config{}; if (!yaml.empty()) { TestUtility::loadFromYaml(yaml, proto_config); } @@ -98,7 +100,7 @@ class HttpFilterTest : public testing::Test { } } - ExternalProcessorStreamPtr doStart(ExternalProcessorCallbacks& callbacks) { + ExternalProcessorStreamPtr doStart(ExternalProcessorCallbacks& callbacks, testing::Unused) { stream_callbacks_ = &callbacks; auto stream = std::make_unique(); @@ -244,6 +246,7 @@ class HttpFilterTest : public testing::Test { Http::MockStreamDecoderFilterCallbacks decoder_callbacks_; Http::MockStreamEncoderFilterCallbacks encoder_callbacks_; Router::RouteConstSharedPtr route_; + testing::NiceMock stream_info_; Http::TestRequestHeaderMapImpl request_headers_; Http::TestResponseHeaderMapImpl response_headers_; Http::TestRequestTrailerMapImpl request_trailers_; diff --git a/test/extensions/filters/http/ext_proc/mock_server.h b/test/extensions/filters/http/ext_proc/mock_server.h index bb479d175e290..ea0875b01a2e7 100644 --- a/test/extensions/filters/http/ext_proc/mock_server.h +++ b/test/extensions/filters/http/ext_proc/mock_server.h @@ -13,14 +13,15 @@ class MockClient : public ExternalProcessorClient { public: MockClient(); ~MockClient() override; - MOCK_METHOD(ExternalProcessorStreamPtr, start, (ExternalProcessorCallbacks&)); + MOCK_METHOD(ExternalProcessorStreamPtr, start, + (ExternalProcessorCallbacks&, const StreamInfo::StreamInfo& stream_info)); }; class MockStream : public ExternalProcessorStream { public: MockStream(); ~MockStream() override; - MOCK_METHOD(void, send, (envoy::service::ext_proc::v3alpha::ProcessingRequest&&, bool)); + MOCK_METHOD(void, send, (envoy::service::ext_proc::v3::ProcessingRequest&&, bool)); MOCK_METHOD(bool, close, ()); }; diff --git a/test/extensions/filters/http/ext_proc/mutation_utils_test.cc b/test/extensions/filters/http/ext_proc/mutation_utils_test.cc index c617caa299d59..6b370811ac1eb 100644 --- a/test/extensions/filters/http/ext_proc/mutation_utils_test.cc +++ b/test/extensions/filters/http/ext_proc/mutation_utils_test.cc @@ -11,7 +11,7 @@ namespace HttpFilters { namespace ExternalProcessing { namespace { -using envoy::service::ext_proc::v3alpha::BodyMutation; +using envoy::service::ext_proc::v3::BodyMutation; using Http::LowerCaseString; @@ -53,7 +53,7 @@ TEST(MutationUtils, TestApplyMutations) { {"x-envoy-strange-thing", "No"}, }; - envoy::service::ext_proc::v3alpha::HeaderMutation mutation; + envoy::service::ext_proc::v3::HeaderMutation mutation; auto* s = mutation.add_set_headers(); s->mutable_append()->set_value(true); s->mutable_header()->set_key("x-append-this"); @@ -135,7 +135,7 @@ TEST(MutationUtils, TestApplyMutations) { TEST(MutationUtils, TestNonAppendableHeaders) { Http::TestRequestHeaderMapImpl headers; - envoy::service::ext_proc::v3alpha::HeaderMutation mutation; + envoy::service::ext_proc::v3::HeaderMutation mutation; auto* s = mutation.add_set_headers(); s->mutable_append()->set_value(true); s->mutable_header()->set_key(":path"); diff --git a/test/extensions/filters/http/ext_proc/ordering_test.cc b/test/extensions/filters/http/ext_proc/ordering_test.cc index edc477994513e..776184bb88286 100644 --- a/test/extensions/filters/http/ext_proc/ordering_test.cc +++ b/test/extensions/filters/http/ext_proc/ordering_test.cc @@ -9,6 +9,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/stream_info/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/cluster_manager.h" @@ -21,10 +22,10 @@ namespace HttpFilters { namespace ExternalProcessing { namespace { -using envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using Event::MockTimer; using Http::FilterDataStatus; @@ -56,10 +57,11 @@ class OrderingTest : public testing::Test { void initialize(absl::optional> cb) { client_ = std::make_unique(); route_ = std::make_shared>(); - EXPECT_CALL(*client_, start(_)).WillOnce(Invoke(this, &OrderingTest::doStart)); + EXPECT_CALL(*client_, start(_, _)).WillOnce(Invoke(this, &OrderingTest::doStart)); EXPECT_CALL(encoder_callbacks_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); EXPECT_CALL(decoder_callbacks_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(Return(route_)); + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); ExternalProcessor proto_config; proto_config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("ext_proc_server"); @@ -75,7 +77,8 @@ class OrderingTest : public testing::Test { void TearDown() override { filter_->onDestroy(); } // Called by the "start" method on the stream by the filter - virtual ExternalProcessorStreamPtr doStart(ExternalProcessorCallbacks& callbacks) { + virtual ExternalProcessorStreamPtr doStart(ExternalProcessorCallbacks& callbacks, + const StreamInfo::StreamInfo&) { stream_callbacks_ = &callbacks; auto stream = std::make_unique(); EXPECT_CALL(*stream, send(_, _)).WillRepeatedly(Invoke(this, &OrderingTest::doSend)); @@ -205,6 +208,7 @@ class OrderingTest : public testing::Test { Router::RouteConstSharedPtr route_; Http::MockStreamDecoderFilterCallbacks decoder_callbacks_; Http::MockStreamEncoderFilterCallbacks encoder_callbacks_; + testing::NiceMock stream_info_; Http::TestRequestHeaderMapImpl request_headers_; Http::TestResponseHeaderMapImpl response_headers_; Http::TestRequestTrailerMapImpl request_trailers_; @@ -215,7 +219,8 @@ class OrderingTest : public testing::Test { class FastFailOrderingTest : public OrderingTest { // All tests using this class have gRPC streams that will fail while being opened. - ExternalProcessorStreamPtr doStart(ExternalProcessorCallbacks& callbacks) override { + ExternalProcessorStreamPtr doStart(ExternalProcessorCallbacks& callbacks, + const StreamInfo::StreamInfo&) override { auto stream = std::make_unique(); EXPECT_CALL(*stream, close()); callbacks.onGrpcError(Grpc::Status::Internal); diff --git a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc index 4e47554838335..40a27786f665e 100644 --- a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc @@ -1,5 +1,5 @@ -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/common/common/hash.h" #include "source/common/network/address_impl.h" @@ -18,9 +18,9 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using Http::LowerCaseString; @@ -69,6 +69,12 @@ class StreamingIntegrationTest : public HttpIntegrationTest, const auto addr = Network::Test::getCanonicalLoopbackAddress(ipVersion()); const auto addr_port = Network::Utility::getAddressWithPort(*addr, test_processor_.port()); setGrpcService(*proto_config_.mutable_grpc_service(), "ext_proc_server", addr_port); + // Insert some extra metadata. This ensures that we are actually passing the + // "stream info" from the original HTTP request all the way down to the + // ext_proc stream. + auto* metadata = proto_config_.mutable_grpc_service()->mutable_initial_metadata()->Add(); + metadata->set_key("x-request-id"); + metadata->set_value("%REQ(x-request-id)%"); // Merge the filter. envoy::config::listener::v3::Filter ext_proc_filter; @@ -123,7 +129,7 @@ class StreamingIntegrationTest : public HttpIntegrationTest, } TestProcessor test_processor_; - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config_{}; + envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor proto_config_{}; IntegrationStreamDecoderPtr client_response_; std::atomic processor_request_hash_; std::atomic processor_response_hash_; @@ -134,14 +140,15 @@ INSTANTIATE_TEST_SUITE_P(StreamingProtocols, StreamingIntegrationTest, GRPC_CLIENT_INTEGRATION_PARAMS); // Send a body that's larger than the buffer limit, and have the processor return immediately -// after the headers come in. +// after the headers come in. Also check the metadata in this test. TEST_P(StreamingIntegrationTest, PostAndProcessHeadersOnly) { uint32_t num_chunks = 150; uint32_t chunk_size = 1000; // This starts the gRPC server in the background. It'll be shut down when we stop the tests. test_processor_.start( - ipVersion(), [](grpc::ServerReaderWriter* stream) { + ipVersion(), + [](grpc::ServerReaderWriter* stream) { // This is the same gRPC stream processing code that a "user" of ext_proc // would write. In this case, we expect to receive a request_headers // message, and then close the stream. @@ -154,12 +161,20 @@ TEST_P(StreamingIntegrationTest, PostAndProcessHeadersOnly) { stream->Write(header_resp); // Returning here closes the stream, unless we had an ASSERT failure // previously. + }, + [](grpc::ServerContext* ctx) { + // Verify that the metadata set in the grpc client configuration + // above is actually sent to our RPC. + auto request_id = ctx->client_metadata().find("x-request-id"); + ASSERT_NE(request_id, ctx->client_metadata().end()); + EXPECT_EQ(request_id->second, "sent some metadata"); }); initializeConfig(); HttpIntegrationTest::initialize(); auto& encoder = sendClientRequestHeaders([num_chunks, chunk_size](Http::HeaderMap& headers) { headers.addCopy(LowerCaseString("expect_request_size_bytes"), num_chunks * chunk_size); + headers.addCopy(LowerCaseString("x-request-id"), "sent some metadata"); }); for (uint32_t i = 0; i < num_chunks; i++) { diff --git a/test/extensions/filters/http/ext_proc/test_processor.cc b/test/extensions/filters/http/ext_proc/test_processor.cc index 47267ce8024fd..26094afbd1314 100644 --- a/test/extensions/filters/http/ext_proc/test_processor.cc +++ b/test/extensions/filters/http/ext_proc/test_processor.cc @@ -1,6 +1,6 @@ #include "test/extensions/filters/http/ext_proc/test_processor.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "test/test_common/network_utility.h" @@ -13,9 +13,12 @@ namespace HttpFilters { namespace ExternalProcessing { grpc::Status ProcessorWrapper::Process( - grpc::ServerContext*, - grpc::ServerReaderWriter* stream) { + grpc::ServerContext* ctx, + grpc::ServerReaderWriter* stream) { + if (context_callback_) { + (*context_callback_)(ctx); + } callback_(stream); if (testing::Test::HasFatalFailure()) { // This is not strictly necessary, but it may help in troubleshooting to @@ -26,8 +29,9 @@ grpc::Status ProcessorWrapper::Process( return grpc::Status::OK; } -void TestProcessor::start(const Network::Address::IpVersion ip_version, ProcessingFunc cb) { - wrapper_ = std::make_unique(cb); +void TestProcessor::start(const Network::Address::IpVersion ip_version, ProcessingFunc cb, + absl::optional context_cb) { + wrapper_ = std::make_unique(cb, context_cb); grpc::ServerBuilder builder; builder.RegisterService(wrapper_.get()); builder.AddListeningPort( diff --git a/test/extensions/filters/http/ext_proc/test_processor.h b/test/extensions/filters/http/ext_proc/test_processor.h index 17fae05c77fe1..bd0d8518b7bd9 100644 --- a/test/extensions/filters/http/ext_proc/test_processor.h +++ b/test/extensions/filters/http/ext_proc/test_processor.h @@ -4,8 +4,8 @@ #include #include "envoy/network/address.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.grpc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.grpc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "grpc++/server.h" #include "gtest/gtest.h" @@ -17,24 +17,29 @@ namespace ExternalProcessing { // Implementations of this function are called for each gRPC stream sent // to the external processing server. -using ProcessingFunc = std::function*)>; +using ProcessingFunc = + std::function*)>; + +// An implementation of this function may be called so that a test may verify +// the gRPC context. +using ContextProcessingFunc = std::function; // An implementation of the ExternalProcessor service that may be included // in integration tests. -class ProcessorWrapper : public envoy::service::ext_proc::v3alpha::ExternalProcessor::Service { +class ProcessorWrapper : public envoy::service::ext_proc::v3::ExternalProcessor::Service { public: - ProcessorWrapper(ProcessingFunc& cb) : callback_(cb) {} + ProcessorWrapper(ProcessingFunc& cb, absl::optional context_cb) + : callback_(cb), context_callback_(context_cb) {} - grpc::Status - Process(grpc::ServerContext*, - grpc::ServerReaderWriter* stream) - override; + grpc::Status Process( + grpc::ServerContext*, + grpc::ServerReaderWriter* stream) override; private: ProcessingFunc callback_; + absl::optional context_callback_; }; // This class starts a gRPC server supporting the ExternalProcessor service. @@ -45,7 +50,8 @@ class TestProcessor { // Start the processor listening on an ephemeral port (port 0) on the local host. // All new streams will be delegated to the specified function. The function // will be invoked in a background thread controlled by the gRPC server. - void start(const Network::Address::IpVersion ip_version, ProcessingFunc cb); + void start(const Network::Address::IpVersion ip_version, ProcessingFunc cb, + absl::optional context_cb = absl::nullopt); // Stop the processor from listening once all streams are closed, and exit // the listening threads. diff --git a/test/extensions/filters/http/fault/config_test.cc b/test/extensions/filters/http/fault/config_test.cc index 440bd0ccae85b..6bf877d09d2c8 100644 --- a/test/extensions/filters/http/fault/config_test.cc +++ b/test/extensions/filters/http/fault/config_test.cc @@ -69,11 +69,12 @@ TEST(FaultFilterConfigTest, FaultFilterEmptyProto) { cb(filter_callback); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(FaultFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.fault"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/grpc_http1_bridge/BUILD b/test/extensions/filters/http/grpc_http1_bridge/BUILD index 24139e98a4b58..1cd2e3dda5c23 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_bridge/BUILD @@ -35,3 +35,18 @@ envoy_extension_cc_test( "//test/mocks/server:factory_context_mocks", ], ) + +envoy_extension_cc_test( + name = "grpc_http1_bridge_integration_test", + srcs = [ + "grpc_http1_bridge_integration_test.cc", + ], + extension_names = ["envoy.filters.http.grpc_http1_bridge"], + deps = [ + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/extensions/filters/http/grpc_http1_bridge:config", + "//source/extensions/filters/http/health_check:config", + "//test/integration:http_integration_lib", + ], +) diff --git a/test/extensions/filters/http/grpc_http1_bridge/config_test.cc b/test/extensions/filters/http/grpc_http1_bridge/config_test.cc index b1dd30299e995..07214e4a544f8 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/config_test.cc +++ b/test/extensions/filters/http/grpc_http1_bridge/config_test.cc @@ -23,11 +23,12 @@ TEST(GrpcHttp1BridgeFilterConfigTest, GrpcHttp1BridgeFilter) { cb(filter_callback); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(GrpcHttp1BridgeFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.grpc_http1_bridge"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/grpc_http1_bridge/grpc_http1_bridge_integration_test.cc b/test/extensions/filters/http/grpc_http1_bridge/grpc_http1_bridge_integration_test.cc new file mode 100644 index 0000000000000..f8721b0c3121e --- /dev/null +++ b/test/extensions/filters/http/grpc_http1_bridge/grpc_http1_bridge_integration_test.cc @@ -0,0 +1,53 @@ +#include "test/integration/http_integration.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +// A test class for testing HTTP/1.1 upstream and downstreams + +class GrpcIntegrationTest : public testing::TestWithParam, + public HttpIntegrationTest { +public: + GrpcIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, GetParam()) {} +}; + +// Test hitting the bridge filter with too many response bytes to buffer. Given +// the headers are not proxied, the connection manager will send a local error reply. +TEST_P(GrpcIntegrationTest, HittingGrpcFilterLimitBufferingHeaders) { + config_helper_.prependFilter( + "{ name: grpc_http1_bridge, typed_config: { \"@type\": " + "type.googleapis.com/envoy.extensions.filters.http.grpc_http1_bridge.v3.Config } }"); + config_helper_.setBufferLimits(1024, 1024); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"content-type", "application/grpc"}, + {"x-envoy-retry-grpc-on", "cancelled"}}); + waitForNextUpstreamRequest(); + + // Send the overly large response. Because the grpc_http1_bridge filter buffers and buffer + // limits are exceeded, this will be translated into an unknown gRPC error. + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(1024 * 65, false); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Http::HttpStatusIs("200")); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().GrpcStatus, "2")); // Unknown gRPC error +} + +INSTANTIATE_TEST_SUITE_P(IpVersions, GrpcIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +} // namespace +} // namespace Envoy diff --git a/test/extensions/filters/http/grpc_json_transcoder/config_test.cc b/test/extensions/filters/http/grpc_json_transcoder/config_test.cc index 05186021d7606..690040c33b8d2 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/config_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/config_test.cc @@ -22,11 +22,12 @@ TEST(GrpcJsonTranscoderFilterConfigTest, ValidateFail) { ProtoValidationException); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(GrpcJsonTranscoderFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.grpc_json_transcoder"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 99baf6b1789b1..fccc8902d00c7 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -218,18 +218,47 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryPost) { R"({"id":"20","theme":"Children"})"); } +TEST_P(GrpcJsonTranscoderIntegrationTest, TestParamUnescapePlus) { + const std::string filter = + R"EOF( + name: grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + proto_descriptor : "{}" + services : "bookstore.Bookstore" + query_param_unescape_plus: true + )EOF"; + config_helper_.prependFilter( + fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); + HttpIntegrationTest::initialize(); + // Test '+', 'query_param_unescape_plus' is true, '-' is converted to space. + testTranscoding( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/shelf?shelf.theme=Children+Books"}, + {":authority", "host"}, + {"content-type", "application/json"}}, + "", {R"(shelf { theme: "Children Books" })"}, {R"(id: 20 theme: "Children" )"}, Status(), + Http::TestResponseHeaderMapImpl{ + {":status", "200"}, + {"content-type", "application/json"}, + }, + R"({"id":"20","theme":"Children"})"); +} + TEST_P(GrpcJsonTranscoderIntegrationTest, QueryParams) { HttpIntegrationTest::initialize(); // 1. Binding theme='Children' in CreateShelfRequest // Using the following HTTP template: // POST /shelves // body: shelf + + // Test '+', 'query_param_unescape_plus' is false by default, '-' is not converted to space. testTranscoding( Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/shelf?shelf.theme=Children"}, + {":path", "/shelf?shelf.theme=Children+Books"}, {":authority", "host"}, {"content-type", "application/json"}}, - "", {R"(shelf { theme: "Children" })"}, {R"(id: 20 theme: "Children" )"}, Status(), + "", {R"(shelf { theme: "Children+Books" })"}, {R"(id: 20 theme: "Children" )"}, Status(), Http::TestResponseHeaderMapImpl{ {":status", "200"}, {"content-type", "application/json"}, diff --git a/test/extensions/filters/http/grpc_web/config_test.cc b/test/extensions/filters/http/grpc_web/config_test.cc index 32c7d656ec78c..400fec0fd1240 100644 --- a/test/extensions/filters/http/grpc_web/config_test.cc +++ b/test/extensions/filters/http/grpc_web/config_test.cc @@ -23,11 +23,12 @@ TEST(GrpcWebFilterConfigTest, GrpcWebFilter) { cb(filter_callback); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(GrpcWebFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.grpc_web"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/health_check/BUILD b/test/extensions/filters/http/health_check/BUILD index c72e32c8adbaf..74a5d771c601b 100644 --- a/test/extensions/filters/http/health_check/BUILD +++ b/test/extensions/filters/http/health_check/BUILD @@ -38,3 +38,17 @@ envoy_extension_cc_test( "@envoy_api//envoy/extensions/filters/http/health_check/v3:pkg_cc_proto", ], ) + +envoy_extension_cc_test( + name = "health_check_integration_test", + srcs = [ + "health_check_integration_test.cc", + ], + extension_names = ["envoy.filters.http.health_check"], + deps = [ + "//source/extensions/filters/http/buffer:config", + "//source/extensions/filters/http/health_check:config", + "//test/integration:http_protocol_integration_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/health_check/config_test.cc b/test/extensions/filters/http/health_check/config_test.cc index 4faa78c630afe..3eb5204b0fff9 100644 --- a/test/extensions/filters/http/health_check/config_test.cc +++ b/test/extensions/filters/http/health_check/config_test.cc @@ -270,11 +270,12 @@ TEST(HealthCheckFilterConfig, HealthCheckFilterDuplicateNoMatch) { testHealthCheckHeaderMatch(config, headers, false); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(HealthCheckFilterConfig, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.health_check"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/health_check/health_check_integration_test.cc b/test/extensions/filters/http/health_check/health_check_integration_test.cc new file mode 100644 index 0000000000000..8c0f18d21231a --- /dev/null +++ b/test/extensions/filters/http/health_check/health_check_integration_test.cc @@ -0,0 +1,185 @@ +#include "test/integration/http_protocol_integration.h" + +using testing::HasSubstr; +using testing::Not; + +namespace Envoy { +namespace { + +class HealthCheckIntegrationTest : public HttpProtocolIntegrationTest { +public: + void initialize() override { + config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); + HttpProtocolIntegrationTest::initialize(); + } + absl::string_view request(const std::string port_key, const std::string method, + const std::string endpoint, BufferingStreamDecoderPtr& response) { + response = IntegrationUtil::makeSingleRequest(lookupPort(port_key), method, endpoint, "", + downstreamProtocol(), version_); + EXPECT_TRUE(response->complete()); + return response->headers().getStatusValue(); + } +}; + +// Add a health check filter and verify correct behavior when draining. +TEST_P(HealthCheckIntegrationTest, DrainCloseGradual) { + // The probability of drain close increases over time. With a high timeout, + // the probability will be very low, but the rapid retries prevent this from + // increasing total test time. + drain_time_ = std::chrono::seconds(100); + initialize(); + + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + EXPECT_FALSE(codec_client_->disconnected()); + + IntegrationStreamDecoderPtr response; + while (!test_server_->counter("http.config_test.downstream_cx_drain_close")->value()) { + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + ASSERT_TRUE(response->waitForEndStream()); + } + EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_drain_close")->value(), 1L); + + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + if (downstream_protocol_ == Http::CodecType::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } +} + +TEST_P(HealthCheckIntegrationTest, DrainCloseImmediate) { + drain_strategy_ = Server::DrainStrategy::Immediate; + drain_time_ = std::chrono::seconds(100); + initialize(); + + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + EXPECT_FALSE(codec_client_->disconnected()); + + IntegrationStreamDecoderPtr response; + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + ASSERT_TRUE(response->waitForEndStream()); + + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + if (downstream_protocol_ == Http::CodecType::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } +} + +// Add a health check filter and verify correct computation of health based on upstream status. +TEST_P(HealthCheckIntegrationTest, ComputedHealthCheck) { + config_helper_.prependFilter(R"EOF( +name: health_check +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + pass_through_mode: false + cluster_min_healthy_percentages: + example_cluster_name: { value: 75 } +)EOF"); + HttpProtocolIntegrationTest::initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/healthcheck"}, {":scheme", "http"}, {":authority", "host"}}); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + +// Add a health check filter and verify correct computation of health based on upstream status. +TEST_P(HealthCheckIntegrationTest, ModifyBuffer) { + config_helper_.prependFilter(R"EOF( +name: health_check +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + pass_through_mode: false + cluster_min_healthy_percentages: + example_cluster_name: { value: 75 } +)EOF"); + HttpProtocolIntegrationTest::initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/healthcheck"}, {":scheme", "http"}, {":authority", "host"}}); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + +TEST_P(HealthCheckIntegrationTest, HealthCheck) { + initialize(); + + BufferingStreamDecoderPtr response; + EXPECT_EQ("200", request("http", "POST", "/healthcheck", response)); + + EXPECT_EQ("200", request("admin", "POST", "/healthcheck/fail", response)); + EXPECT_EQ("503", request("http", "GET", "/healthcheck", response)); + + EXPECT_EQ("200", request("admin", "POST", "/healthcheck/ok", response)); + EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); +} + +TEST_P(HealthCheckIntegrationTest, HealthCheckWithoutServerStats) { + envoy::config::metrics::v3::StatsMatcher stats_matcher; + stats_matcher.mutable_exclusion_list()->add_patterns()->set_prefix("server."); + config_helper_.addConfigModifier( + [stats_matcher](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + *bootstrap.mutable_stats_config()->mutable_stats_matcher() = stats_matcher; + }); + initialize(); + + BufferingStreamDecoderPtr response; + EXPECT_EQ("200", request("http", "POST", "/healthcheck", response)); + EXPECT_EQ("200", request("admin", "GET", "/stats", response)); + EXPECT_THAT(response->body(), Not(HasSubstr("server."))); + + EXPECT_EQ("200", request("admin", "POST", "/healthcheck/fail", response)); + EXPECT_EQ("503", request("http", "GET", "/healthcheck", response)); + EXPECT_EQ("200", request("admin", "GET", "/stats", response)); + EXPECT_THAT(response->body(), Not(HasSubstr("server."))); + + EXPECT_EQ("200", request("admin", "POST", "/healthcheck/ok", response)); + EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); + EXPECT_EQ("200", request("admin", "GET", "/stats", response)); + EXPECT_THAT(response->body(), Not(HasSubstr("server."))); +} + +TEST_P(HealthCheckIntegrationTest, HealthCheckWithBufferFilter) { + config_helper_.prependFilter(ConfigHelper::defaultBufferFilter()); + initialize(); + + BufferingStreamDecoderPtr response; + EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); +} + +INSTANTIATE_TEST_SUITE_P(Protocols, HealthCheckIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP1, Http::CodecType::HTTP2}, + {Http::CodecType::HTTP1})), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +} // namespace +} // namespace Envoy diff --git a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc index 5c7c666af5dec..e4923236c688d 100644 --- a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc +++ b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc @@ -286,11 +286,12 @@ TEST_F(IpTaggingFilterTest, ClearRouteCache) { EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags)); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(IpTaggingFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.ip_tagging"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/jwt_authn/authenticator_test.cc b/test/extensions/filters/http/jwt_authn/authenticator_test.cc index b2b92cf75cdd2..ef39bdd17a357 100644 --- a/test/extensions/filters/http/jwt_authn/authenticator_test.cc +++ b/test/extensions/filters/http/jwt_authn/authenticator_test.cc @@ -61,13 +61,13 @@ class AuthenticatorTest : public testing::Test { std::function on_complete_cb = [&expected_status](const Status& status) { ASSERT_EQ(status, expected_status); }; - auto set_payload_cb = [this](const std::string& name, const ProtobufWkt::Struct& payload) { - out_name_ = name; - out_payload_ = payload; + auto set_extracted_jwt_data_cb = [this](const std::string& name, + const ProtobufWkt::Struct& extracted_data) { + this->addExtractedData(name, extracted_data); }; initTokenExtractor(); auto tokens = extractor_->extract(headers); - auth_->verify(headers, parent_span_, std::move(tokens), std::move(set_payload_cb), + auth_->verify(headers, parent_span_, std::move(tokens), std::move(set_extracted_jwt_data_cb), std::move(on_complete_cb)); } @@ -79,6 +79,12 @@ class AuthenticatorTest : public testing::Test { extractor_ = Extractor::create(providers); } + // This is like ContextImpl::addExtractedData in + // source/extensions/filters/http/jwt_authn/verifier.cc. + void addExtractedData(const std::string& name, const ProtobufWkt::Struct& extracted_data) { + *(*out_extracted_data_.mutable_fields())[name].mutable_struct_value() = extracted_data; + } + JwtAuthentication proto_config_; ExtractorConstPtr extractor_; std::shared_ptr filter_config_; @@ -87,8 +93,7 @@ class AuthenticatorTest : public testing::Test { AuthenticatorPtr auth_; ::google::jwt_verify::JwksPtr jwks_; NiceMock mock_factory_ctx_; - std::string out_name_; - ProtobufWkt::Struct out_payload_; + ProtobufWkt::Struct out_extracted_data_; NiceMock parent_span_; }; @@ -149,14 +154,14 @@ TEST_F(AuthenticatorTest, TestForwardJwt) { // Verify the token is NOT removed. EXPECT_TRUE(headers.has(Http::CustomHeaders::get().Authorization)); - // Payload not set by default - EXPECT_EQ(out_name_, ""); + // Payload is not set by default. + EXPECT_TRUE(out_extracted_data_.fields().empty()); EXPECT_EQ(1U, filter_config_->stats().jwks_fetch_success_.value()); EXPECT_EQ(0U, filter_config_->stats().jwks_fetch_failed_.value()); } -// This test verifies the Jwt payload is set. +// This test verifies the JWT payload is set. TEST_F(AuthenticatorTest, TestSetPayload) { // Config payload_in_metadata flag (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata( @@ -172,12 +177,74 @@ TEST_F(AuthenticatorTest, TestSetPayload) { expectVerifyStatus(Status::Ok, headers); - // Payload is set - EXPECT_EQ(out_name_, "my_payload"); + // Only one field is set. + EXPECT_EQ(1, out_extracted_data_.fields().size()); - ProtobufWkt::Struct expected_payload; + ProtobufWkt::Value expected_payload; TestUtility::loadFromJson(ExpectedPayloadJSON, expected_payload); - EXPECT_TRUE(TestUtility::protoEqual(out_payload_, expected_payload)); + EXPECT_TRUE( + TestUtility::protoEqual(expected_payload, out_extracted_data_.fields().at("my_payload"))); +} + +// This test verifies setting only the extracted header to metadata. +TEST_F(AuthenticatorTest, TestSetHeader) { + // Set the extracted header to metadata. + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_header_in_metadata( + "my_header"); + + createAuthenticator(); + EXPECT_CALL(*raw_fetcher_, fetch(_, _)) + .WillOnce(Invoke([this](Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { + receiver.onJwksSuccess(std::move(jwks_)); + })); + + // Expect to have a valid JWT. + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; + + expectVerifyStatus(Status::Ok, headers); + + // Only one field is set. + EXPECT_EQ(1, out_extracted_data_.fields().size()); + + // We should expect empty JWT payload. + ProtobufWkt::Value expected_payload; + TestUtility::loadFromJson(ExpectedHeaderJSON, expected_payload); + EXPECT_TRUE( + TestUtility::protoEqual(expected_payload, out_extracted_data_.fields().at("my_header"))); +} + +// This test verifies setting the extracted payload and header to metadata. +TEST_F(AuthenticatorTest, TestSetPayloadAndHeader) { + // Set the extracted payload and header to metadata. + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata( + "my_payload"); + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_header_in_metadata( + "my_header"); + + createAuthenticator(); + EXPECT_CALL(*raw_fetcher_, fetch(_, _)) + .WillOnce(Invoke([this](Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { + receiver.onJwksSuccess(std::move(jwks_)); + })); + + // Expect to have a valid JWT. + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; + + expectVerifyStatus(Status::Ok, headers); + + // Payload and header are set. + EXPECT_EQ(2, out_extracted_data_.fields().size()); + + // We should expect both JWT payload and header are set. + ProtobufWkt::Value expected_payload; + TestUtility::loadFromJson(ExpectedPayloadJSON, expected_payload); + EXPECT_TRUE( + TestUtility::protoEqual(expected_payload, out_extracted_data_.fields().at("my_payload"))); + + ProtobufWkt::Value expected_header; + TestUtility::loadFromJson(ExpectedHeaderJSON, expected_header); + EXPECT_TRUE( + TestUtility::protoEqual(expected_header, out_extracted_data_.fields().at("my_header"))); } // This test verifies the Jwt with non existing kid @@ -669,12 +736,14 @@ class AuthenticatorJwtCacheTest : public testing::Test { std::function on_complete_cb = [&expected_status](const Status& status) { ASSERT_EQ(status, expected_status); }; - auto set_payload_cb = [this](const std::string& name, const ProtobufWkt::Struct& payload) { + auto set_extracted_jwt_data_cb = [this](const std::string& name, + const ProtobufWkt::Struct& extracted_data) { out_name_ = name; - out_payload_ = payload; + out_extracted_data_ = extracted_data; }; auto tokens = extractor_->extract(headers); - auth_->verify(headers, parent_span_, std::move(tokens), set_payload_cb, on_complete_cb); + auth_->verify(headers, parent_span_, std::move(tokens), set_extracted_jwt_data_cb, + on_complete_cb); } ::google::jwt_verify::JwksPtr jwks_; @@ -686,7 +755,7 @@ class AuthenticatorJwtCacheTest : public testing::Test { ExtractorConstPtr extractor_; NiceMock parent_span_; std::string out_name_; - ProtobufWkt::Struct out_payload_; + ProtobufWkt::Struct out_extracted_data_; }; TEST_F(AuthenticatorJwtCacheTest, TestNonProvider) { @@ -751,7 +820,7 @@ TEST_F(AuthenticatorJwtCacheTest, TestCacheHit) { ProtobufWkt::Struct expected_payload; TestUtility::loadFromJson(ExpectedPayloadJSON, expected_payload); - EXPECT_TRUE(TestUtility::protoEqual(out_payload_, expected_payload)); + EXPECT_TRUE(TestUtility::protoEqual(out_extracted_data_, expected_payload)); } } // namespace diff --git a/test/extensions/filters/http/jwt_authn/extractor_test.cc b/test/extensions/filters/http/jwt_authn/extractor_test.cc index 2adaf35e7e463..0289e909c4958 100644 --- a/test/extensions/filters/http/jwt_authn/extractor_test.cc +++ b/test/extensions/filters/http/jwt_authn/extractor_test.cc @@ -286,16 +286,19 @@ TEST_F(ExtractorTest, TestCookieToken) { EXPECT_EQ(tokens[0]->token(), "token-cookie-value"); EXPECT_TRUE(tokens[0]->isIssuerAllowed("issuer9")); EXPECT_FALSE(tokens[0]->isIssuerAllowed("issuer10")); + tokens[0]->removeJwt(headers); // only issuer9 has specified "token-cookie-2" cookie location. EXPECT_EQ(tokens[1]->token(), "token-cookie-value-2"); EXPECT_TRUE(tokens[1]->isIssuerAllowed("issuer9")); EXPECT_FALSE(tokens[1]->isIssuerAllowed("issuer10")); + tokens[1]->removeJwt(headers); // only issuer10 has specified "token-cookie-3" cookie location. EXPECT_EQ(tokens[2]->token(), "token-cookie-value-3"); EXPECT_TRUE(tokens[2]->isIssuerAllowed("issuer10")); EXPECT_FALSE(tokens[2]->isIssuerAllowed("issuer9")); + tokens[2]->removeJwt(headers); } // Test extracting multiple tokens. diff --git a/test/extensions/filters/http/jwt_authn/filter_config_test.cc b/test/extensions/filters/http/jwt_authn/filter_config_test.cc index c651c932e6db6..a3c8343fc686c 100644 --- a/test/extensions/filters/http/jwt_authn/filter_config_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_config_test.cc @@ -175,7 +175,8 @@ TEST(HttpJwtAuthnFilterConfigTest, VerifyTLSLifetime) { // The threadLocal, dispatcher and api that are used by the filter config, actually belong to // the server factory context that who's lifetime is longer. We simulate that by returning // their instances from outside the scope. - ON_CALL(context, dispatcher()).WillByDefault(ReturnRef(server_context.dispatcher())); + ON_CALL(context, mainThreadDispatcher()) + .WillByDefault(ReturnRef(server_context.mainThreadDispatcher())); ON_CALL(context, api()).WillByDefault(ReturnRef(server_context.api())); ON_CALL(context, threadLocal()).WillByDefault(ReturnRef(server_context.threadLocal())); diff --git a/test/extensions/filters/http/jwt_authn/filter_test.cc b/test/extensions/filters/http/jwt_authn/filter_test.cc index 6afa8ac3e8ec4..17e84718d573b 100644 --- a/test/extensions/filters/http/jwt_authn/filter_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_test.cc @@ -72,7 +72,6 @@ class FilterTest : public testing::Test { NiceMock filter_callbacks_; std::unique_ptr filter_; std::unique_ptr mock_verifier_; - NiceMock verifier_callback_; Http::TestRequestTrailerMapImpl trailers_; std::shared_ptr> mock_route_; std::shared_ptr per_route_config_; @@ -143,21 +142,23 @@ TEST_F(FilterTest, CorsPreflightMssingAccessControlRequestMethod) { EXPECT_EQ(0U, mock_config_->stats().denied_.value()); } -// This test verifies the setPayload call is handled correctly -TEST_F(FilterTest, TestSetPayloadCall) { +// This test verifies the setExtractedData call is handled correctly +TEST_F(FilterTest, TestSetExtractedData) { setupMockConfig(); - ProtobufWkt::Struct payload; + ProtobufWkt::Struct extracted_data; // A successful authentication completed inline: callback is called inside verify(). - EXPECT_CALL(*mock_verifier_, verify(_)).WillOnce(Invoke([&payload](ContextSharedPtr context) { - context->callback()->setPayload(payload); - context->callback()->onComplete(Status::Ok); - })); + EXPECT_CALL(*mock_verifier_, verify(_)) + .WillOnce(Invoke([&extracted_data](ContextSharedPtr context) { + context->callback()->setExtractedData(extracted_data); + context->callback()->onComplete(Status::Ok); + })); EXPECT_CALL(filter_callbacks_.stream_info_, setDynamicMetadata(_, _)) - .WillOnce(Invoke([&payload](const std::string& ns, const ProtobufWkt::Struct& out_payload) { - EXPECT_EQ(ns, "envoy.filters.http.jwt_authn"); - EXPECT_TRUE(TestUtility::protoEqual(out_payload, payload)); - })); + .WillOnce( + Invoke([&extracted_data](const std::string& ns, const ProtobufWkt::Struct& out_payload) { + EXPECT_EQ(ns, "envoy.filters.http.jwt_authn"); + EXPECT_TRUE(TestUtility::protoEqual(out_payload, extracted_data)); + })); auto headers = Http::TestRequestHeaderMapImpl{}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); diff --git a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc index d68164e6ae11a..a9980189ef7fe 100644 --- a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc @@ -84,10 +84,11 @@ class GroupVerifierTest : public testing::Test { EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _)) .WillOnce(Invoke([issuer = it.first, status = it.second]( Http::HeaderMap&, Tracing::Span&, std::vector*, - SetPayloadCallback set_payload_cb, AuthenticatorCallback callback) { + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, + AuthenticatorCallback callback) { if (status == Status::Ok) { ProtobufWkt::Struct empty_struct; - set_payload_cb(issuer, empty_struct); + set_extracted_jwt_data_cb(issuer, empty_struct); } callback(status); })); @@ -97,9 +98,9 @@ class GroupVerifierTest : public testing::Test { createVerifier(); } - // This expected payload is only for createSyncMockAuthsAndVerifier() function - // which set an empty payload struct for each issuer. - static ProtobufWkt::Struct getExpectedPayload(const std::vector& issuers) { + // This expected extracted data is only for createSyncMockAuthsAndVerifier() function + // which set an empty extracted data struct for each issuer. + static ProtobufWkt::Struct getExpectedExtractedData(const std::vector& issuers) { ProtobufWkt::Struct struct_obj; auto* fields = struct_obj.mutable_fields(); for (const auto& issuer : issuers) { @@ -113,9 +114,9 @@ class GroupVerifierTest : public testing::Test { for (const auto& provider : providers) { auto mock_auth = std::make_unique(); EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _)) - .WillOnce(Invoke([&, iss = provider](Http::HeaderMap&, Tracing::Span&, - std::vector*, - SetPayloadCallback, AuthenticatorCallback callback) { + .WillOnce(Invoke([&, iss = provider]( + Http::HeaderMap&, Tracing::Span&, std::vector*, + SetExtractedJwtDataCallback, AuthenticatorCallback callback) { callbacks_[iss] = std::move(callback); })); EXPECT_CALL(*mock_auth, onDestroy()); @@ -167,9 +168,11 @@ TEST_F(GroupVerifierTest, DeeplyNestedAnys) { TestUtility::loadFromYaml(config, proto_config_); createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"example_provider"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual(extracted_data, + getExpectedExtractedData({"example_provider"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{ @@ -220,10 +223,11 @@ TEST_F(GroupVerifierTest, TestRequiresAll) { createSyncMockAuthsAndVerifier( StatusMap{{"example_provider", Status::Ok}, {"other_provider", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual( - payload, getExpectedPayload({"example_provider", "other_provider"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual( + extracted_data, getExpectedExtractedData({"example_provider", "other_provider"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{ @@ -241,8 +245,8 @@ TEST_F(GroupVerifierTest, TestRequiresAllBadFormat) { TestUtility::loadFromYaml(RequiresAllConfig, proto_config_); createAsyncMockAuthsAndVerifier(std::vector{"example_provider", "other_provider"}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtBadFormat)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -264,8 +268,8 @@ TEST_F(GroupVerifierTest, TestRequiresAllMissing) { TestUtility::loadFromYaml(RequiresAllConfig, proto_config_); createAsyncMockAuthsAndVerifier(std::vector{"example_provider", "other_provider"}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtMissed)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -287,8 +291,8 @@ TEST_F(GroupVerifierTest, TestRequiresAllBothFailed) { TestUtility::loadFromYaml(RequiresAllConfig, proto_config_); createAsyncMockAuthsAndVerifier(std::vector{"example_provider", "other_provider"}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtUnknownIssuer)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -307,9 +311,11 @@ TEST_F(GroupVerifierTest, TestRequiresAnyFirstAuthOK) { TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_); createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"example_provider"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual(extracted_data, + getExpectedExtractedData({"example_provider"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{ @@ -328,9 +334,11 @@ TEST_F(GroupVerifierTest, TestRequiresAnyLastAuthOk) { createSyncMockAuthsAndVerifier( StatusMap{{"example_provider", Status::JwtUnknownIssuer}, {"other_provider", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"other_provider"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE( + TestUtility::protoEqual(extracted_data, getExpectedExtractedData({"other_provider"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{ @@ -351,8 +359,8 @@ TEST_F(GroupVerifierTest, TestRequiresAnyAllAuthFailed) { createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::JwtMissed}, {"other_provider", Status::JwtHeaderBadKid}}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -375,8 +383,8 @@ TEST_F(GroupVerifierTest, TestRequiresAnyLastIsJwtMissed) { createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::JwtHeaderBadKid}, {"other_provider", Status::JwtMissed}}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -396,8 +404,8 @@ TEST_F(GroupVerifierTest, TestRequiresAnyLastIsJwtUnknownIssuer) { createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::JwtHeaderBadKid}, {"other_provider", Status::JwtUnknownIssuer}}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -415,9 +423,11 @@ TEST_F(GroupVerifierTest, TestAnyInAllFirstAnyIsOk) { TestUtility::loadFromYaml(AllWithAny, proto_config_); createSyncMockAuthsAndVerifier(StatusMap{{"provider_1", Status::Ok}, {"provider_3", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"provider_1", "provider_3"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual( + extracted_data, getExpectedExtractedData({"provider_1", "provider_3"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{}; @@ -433,9 +443,11 @@ TEST_F(GroupVerifierTest, TestAnyInAllLastAnyIsOk) { {"provider_2", Status::Ok}, {"provider_3", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"provider_2", "provider_3"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual( + extracted_data, getExpectedExtractedData({"provider_2", "provider_3"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{}; @@ -450,8 +462,8 @@ TEST_F(GroupVerifierTest, TestAnyInAllBothInRequireAnyIsOk) { createAsyncMockAuthsAndVerifier( std::vector{"provider_1", "provider_2", "provider_3"}); - // AsyncMockVerifier doesn't set payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // AsyncMockVerifier doesn't set the extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); @@ -468,7 +480,7 @@ TEST_F(GroupVerifierTest, TestAnyInAllBothInRequireAnyFailed) { createAsyncMockAuthsAndVerifier( std::vector{"provider_1", "provider_2", "provider_3"}); - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwksFetchFail)); auto headers = Http::TestRequestHeaderMapImpl{}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); @@ -486,7 +498,7 @@ TEST_F(GroupVerifierTest, TestAllInAnyBothRequireAllFailed) { createSyncMockAuthsAndVerifier( StatusMap{{"provider_1", Status::JwksFetchFail}, {"provider_3", Status::JwtExpired}}); - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtExpired)); auto headers = Http::TestRequestHeaderMapImpl{}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); @@ -500,8 +512,8 @@ TEST_F(GroupVerifierTest, TestAllInAnyFirstAllIsOk) { createAsyncMockAuthsAndVerifier( std::vector{"provider_1", "provider_2", "provider_3", "provider_4"}); - // AsyncMockVerifier doesn't set payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // AsyncMockVerifier doesn't set the extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); diff --git a/test/extensions/filters/http/jwt_authn/mock.h b/test/extensions/filters/http/jwt_authn/mock.h index ff235c5aed00d..7b12ec7ed4dde 100644 --- a/test/extensions/filters/http/jwt_authn/mock.h +++ b/test/extensions/filters/http/jwt_authn/mock.h @@ -29,13 +29,16 @@ class MockAuthenticator : public Authenticator { public: MOCK_METHOD(void, doVerify, (Http::HeaderMap & headers, Tracing::Span& parent_span, - std::vector* tokens, SetPayloadCallback set_payload_cb, + std::vector* tokens, + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, AuthenticatorCallback callback)); void verify(Http::HeaderMap& headers, Tracing::Span& parent_span, - std::vector&& tokens, SetPayloadCallback set_payload_cb, + std::vector&& tokens, + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, AuthenticatorCallback callback) override { - doVerify(headers, parent_span, &tokens, std::move(set_payload_cb), std::move(callback)); + doVerify(headers, parent_span, &tokens, std::move(set_extracted_jwt_data_cb), + std::move(callback)); } MOCK_METHOD(void, onDestroy, ()); @@ -43,7 +46,7 @@ class MockAuthenticator : public Authenticator { class MockVerifierCallbacks : public Verifier::Callbacks { public: - MOCK_METHOD(void, setPayload, (const ProtobufWkt::Struct& payload)); + MOCK_METHOD(void, setExtractedData, (const ProtobufWkt::Struct& payload)); MOCK_METHOD(void, onComplete, (const Status& status)); }; diff --git a/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc b/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc index bdf518ea2c7b8..31ac259c603a1 100644 --- a/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc @@ -58,9 +58,41 @@ TEST_F(ProviderVerifierTest, TestOkJWT) { createVerifier(); MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, PublicKey); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload("my_payload"))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { + EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload("my_payload"))); + })); + + EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); + + auto headers = Http::TestRequestHeaderMapImpl{ + {"Authorization", "Bearer " + std::string(GoodToken)}, + {"sec-istio-auth-userinfo", ""}, + }; + context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); + verifier_->verify(context_); + EXPECT_EQ(ExpectedPayloadValue, headers.get_("sec-istio-auth-userinfo")); +} + +// Test to set the payload (hence dynamic metadata) with the header and payload extracted from the +// verified JWT. +TEST_F(ProviderVerifierTest, TestOkJWTWithExtractedHeaderAndPayload) { + TestUtility::loadFromYaml(ExampleConfig, proto_config_); + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata( + "my_payload"); + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_header_in_metadata( + "my_header"); + createVerifier(); + MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, PublicKey); + + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { + // The expected payload is a merged struct of the extracted (from the JWT) payload and + // header data with "my_payload" and "my_header" as the keys. + ProtobufWkt::Struct expected_payload; + MessageUtil::loadFromJson(ExpectedPayloadAndHeaderJSON, expected_payload); + EXPECT_TRUE(TestUtility::protoEqual(payload, expected_payload)); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); @@ -80,9 +112,10 @@ TEST_F(ProviderVerifierTest, TestSpanPassedDown) { createVerifier(); MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, PublicKey); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload("my_payload"))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { + EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload("my_payload"))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); diff --git a/test/extensions/filters/http/jwt_authn/test_common.h b/test/extensions/filters/http/jwt_authn/test_common.h index 13c083163eb14..114ee7e5ac65f 100644 --- a/test/extensions/filters/http/jwt_authn/test_common.h +++ b/test/extensions/filters/http/jwt_authn/test_common.h @@ -193,6 +193,28 @@ const char ExpectedPayloadJSON[] = R"( } )"; +const char ExpectedHeaderJSON[] = R"( +{ + "alg": "RS256", + "typ": "JWT" +} +)"; + +const char ExpectedPayloadAndHeaderJSON[] = R"( +{ + "my_payload":{ + "iss":"https://example.com", + "exp":2001001001, + "sub":"test@example.com", + "aud":"example_service" + }, + "my_header":{ + "typ":"JWT", + "alg":"RS256" + } +} +)"; + // Token copied from https://github.com/google/jwt_verify_lib/blob/master/src/verify_jwk_ec_test.cc // Use jwt.io to modify payload as: // { diff --git a/test/extensions/filters/http/lua/config_test.cc b/test/extensions/filters/http/lua/config_test.cc index 7cb6dbd44dbad..68582e11ae7e9 100644 --- a/test/extensions/filters/http/lua/config_test.cc +++ b/test/extensions/filters/http/lua/config_test.cc @@ -41,11 +41,12 @@ TEST(LuaFilterConfigTest, LuaFilterInJson) { cb(filter_callback); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(LuaFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.lua"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index a7ae58a4f2003..ce401a0c5cd72 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -41,7 +41,7 @@ class TestFilter : public Filter { public: using Filter::Filter; - MOCK_METHOD(void, scriptLog, (spdlog::level::level_enum level, const char* message)); + MOCK_METHOD(void, scriptLog, (spdlog::level::level_enum level, absl::string_view message)); }; class LuaHttpFilterTest : public testing::Test { @@ -818,6 +818,7 @@ TEST_F(LuaHttpFilterTest, HttpCall) { {":method", "POST"}, {":path", "/"}, {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, {"set-cookie", "variant=chewy; Path=/"}, {"content-length", "11"}}; @@ -841,7 +842,7 @@ TEST_F(LuaHttpFilterTest, HttpCall) { response_message->body().add(response, 8); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("8"))); - EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("resp"))); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(std::string("resp\0nse", 8)))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("0"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("nse"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); @@ -1676,12 +1677,12 @@ TEST_F(LuaHttpFilterTest, GetMetadataFromHandle) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); } -// Test that the deprecated filter name works for metadata. +// Test that the deprecated filter is disabled by default for metadata. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST_F(LuaHttpFilterTest, DEPRECATED_FEATURE_TEST(GetMetadataFromHandleUsingDeprecatedName)) { const std::string SCRIPT{R"EOF( function envoy_on_request(request_handle) request_handle:logTrace(request_handle:metadata():get("foo.bar")["name"]) - request_handle:logTrace(request_handle:metadata():get("foo.bar")["prop"]) end )EOF"}; @@ -1690,7 +1691,6 @@ TEST_F(LuaHttpFilterTest, DEPRECATED_FEATURE_TEST(GetMetadataFromHandleUsingDepr envoy.lua: foo.bar: name: foo - prop: bar )EOF"}; InSequence s; @@ -1699,21 +1699,7 @@ TEST_F(LuaHttpFilterTest, DEPRECATED_FEATURE_TEST(GetMetadataFromHandleUsingDepr // Logs deprecation warning the first time. Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; - EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("foo"))); - EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("bar"))); - EXPECT_LOG_CONTAINS( - "warn", - "Using deprecated http filter extension name 'envoy.lua' for 'envoy.filters.http.lua'", - filter_->decodeHeaders(request_headers, true)); - - // Doesn't log deprecation warning the second time. - setupMetadata(METADATA); - EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("foo"))); - EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("bar"))); - EXPECT_LOG_NOT_CONTAINS( - "warn", - "Using deprecated http filter extension name 'envoy.lua' for 'envoy.filters.http.lua'", - filter_->decodeHeaders(request_headers, true)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("foo"))).Times(0); } // No available metadata on route. @@ -2392,6 +2378,77 @@ TEST_F(LuaHttpFilterTest, LuaFilterSetResponseBufferChunked) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true)); } +// BodyBuffer should not truncated when bodyBuffer set hex character +TEST_F(LuaHttpFilterTest, LuaBodyBufferSetBytesWithHex) { + const std::string SCRIPT{R"EOF( + function envoy_on_response(response_handle) + local bodyBuffer = response_handle:body() + bodyBuffer:setBytes("\x471111") + local body_str = bodyBuffer:getBytes(0, bodyBuffer:length()) + response_handle:logTrace(body_str) + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->encodeHeaders(response_headers, false)); + + Buffer::OwnedImpl response_body(""); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("G1111"))); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true)); + EXPECT_EQ(5, encoder_callbacks_.buffer_->length()); +} + +// BodyBuffer should not truncated when bodyBuffer set zero +TEST_F(LuaHttpFilterTest, LuaBodyBufferSetBytesWithZero) { + const std::string SCRIPT{R"EOF( + function envoy_on_response(response_handle) + local bodyBuffer = response_handle:body() + bodyBuffer:setBytes("\0") + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->encodeHeaders(response_headers, false)); + + Buffer::OwnedImpl response_body("1111"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true)); + EXPECT_EQ(1, encoder_callbacks_.buffer_->length()); +} + +// Script logging a table instead of the expected string. +TEST_F(LuaHttpFilterTest, LogTableInsteadOfString) { + const std::string LOG_TABLE{R"EOF( + function envoy_on_request(request_handle) + request_handle:logTrace({}) + end + )EOF"}; + + InSequence s; + setup(LOG_TABLE); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_CALL( + *filter_, + scriptLog( + spdlog::level::err, + StrEq("[string \"...\"]:3: bad argument #1 to 'logTrace' (string expected, got table)"))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); +} + } // namespace } // namespace Lua } // namespace HttpFilters diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index 35bd0a9267670..14734bb5389fd 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -274,6 +274,20 @@ name: lua request_handle:streamInfo():dynamicMetadata():set("envoy.lb", "foo", "bar") local dynamic_metadata_value = request_handle:streamInfo():dynamicMetadata():get("envoy.lb")["foo"] + local test_header_value_0 = request_handle:headers():getAtIndex("X-Test-Header", 0) + request_handle:headers():add("test_header_value_0", test_header_value_0) + local test_header_value_1 = request_handle:headers():getAtIndex("X-TEST-Header", 1) + request_handle:headers():add("test_header_value_1", test_header_value_1) + local test_header_value_2 = request_handle:headers():getAtIndex("x-test-header", 2) + if test_header_value_2 == nil then + request_handle:headers():add("test_header_value_2", "nil_value") + end + local test_header_value_size = request_handle:headers():getNumValues("x-test-header") + request_handle:headers():add("test_header_value_size", test_header_value_size) + request_handle:headers():add("cookie_0", request_handle:headers():getAtIndex("set-cookie", 0)) + request_handle:headers():add("cookie_1", request_handle:headers():getAtIndex("set-cookie", 1)) + request_handle:headers():add("cookie_size", request_handle:headers():getNumValues("set-cookie")) + request_handle:headers():add("request_body_size", body_length) request_handle:headers():add("request_metadata_foo", metadata["foo"]) request_handle:headers():add("request_metadata_baz", metadata["baz"]) @@ -305,11 +319,10 @@ name: lua initializeFilter(FILTER_AND_CODE); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); - Http::TestRequestHeaderMapImpl request_headers{{":method", "POST"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}, - {"x-forwarded-for", "10.0.0.1"}}; + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, + {":authority", "host"}, {"x-forwarded-for", "10.0.0.1"}, {"x-test-header", "foo"}, + {"x-test-header", "bar"}, {"set-cookie", "foo;bar;"}, {"set-cookie", "1,3;2,5;"}}; auto encoder_decoder = codec_client_->startRequest(request_headers); Http::StreamEncoder& encoder = encoder_decoder.first; @@ -320,6 +333,41 @@ name: lua encoder.encodeData(request_data2, true); waitForNextUpstreamRequest(); + EXPECT_EQ("foo", upstream_request_->headers() + .get(Http::LowerCaseString("test_header_value_0"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("bar", upstream_request_->headers() + .get(Http::LowerCaseString("test_header_value_1"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("nil_value", upstream_request_->headers() + .get(Http::LowerCaseString("test_header_value_2"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("2", upstream_request_->headers() + .get(Http::LowerCaseString("test_header_value_size"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("foo;bar;", upstream_request_->headers() + .get(Http::LowerCaseString("cookie_0"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("1,3;2,5;", upstream_request_->headers() + .get(Http::LowerCaseString("cookie_1"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("2", upstream_request_->headers() + .get(Http::LowerCaseString("cookie_size"))[0] + ->value() + .getStringView()); + EXPECT_EQ("10", upstream_request_->headers() .get(Http::LowerCaseString("request_body_size"))[0] ->value() diff --git a/test/extensions/filters/http/lua/wrappers_test.cc b/test/extensions/filters/http/lua/wrappers_test.cc index 546073baa67ed..e4ffb8e8cda03 100644 --- a/test/extensions/filters/http/lua/wrappers_test.cc +++ b/test/extensions/filters/http/lua/wrappers_test.cc @@ -68,6 +68,57 @@ TEST_F(LuaHeaderMapWrapperTest, Methods) { start("callMe"); } +// Get the total number of values for a certain header with multiple values. +TEST_F(LuaHeaderMapWrapperTest, GetNumValues) { + const std::string SCRIPT{R"EOF( + function callMe(object) + testPrint(object:getNumValues("X-Test")) + testPrint(object:getNumValues(":path")) + testPrint(object:getNumValues("foobar")) + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl headers{{":path", "/"}, {"x-test", "foo"}, {"x-test", "bar"}}; + HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); + EXPECT_CALL(printer_, testPrint("2")); + EXPECT_CALL(printer_, testPrint("1")); + EXPECT_CALL(printer_, testPrint("0")); + start("callMe"); +} + +// Get the value on a certain index for a header with multiple values. +TEST_F(LuaHeaderMapWrapperTest, GetAtIndex) { + const std::string SCRIPT{R"EOF( + function callMe(object) + if object:getAtIndex("x-test", -1) == nil then + testPrint("invalid_negative_index") + end + testPrint(object:getAtIndex("X-Test", 0)) + testPrint(object:getAtIndex("x-test", 1)) + testPrint(object:getAtIndex("x-test", 2)) + if object:getAtIndex("x-test", 3) == nil then + testPrint("nil_value") + end + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl headers{ + {":path", "/"}, {"x-test", "foo"}, {"x-test", "bar"}, {"x-test", ""}}; + HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); + EXPECT_CALL(printer_, testPrint("invalid_negative_index")); + EXPECT_CALL(printer_, testPrint("foo")); + EXPECT_CALL(printer_, testPrint("bar")); + EXPECT_CALL(printer_, testPrint("")); + EXPECT_CALL(printer_, testPrint("nil_value")); + start("callMe"); +} + // Test modifiable methods. TEST_F(LuaHeaderMapWrapperTest, ModifiableMethods) { const std::string SCRIPT{R"EOF( diff --git a/test/extensions/filters/http/oauth2/BUILD b/test/extensions/filters/http/oauth2/BUILD index 04bfb0b34f0f9..8a50c3b43d097 100644 --- a/test/extensions/filters/http/oauth2/BUILD +++ b/test/extensions/filters/http/oauth2/BUILD @@ -18,7 +18,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/http/oauth2:config", "//test/mocks/server:factory_context_mocks", - "@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/oauth2/v3:pkg_cc_proto", ], ) @@ -49,7 +49,7 @@ envoy_extension_cc_test( "//test/integration:http_integration_lib", "//test/mocks/server:server_mocks", "//test/mocks/upstream:upstream_mocks", - "@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/oauth2/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/oauth2/config_test.cc b/test/extensions/filters/http/oauth2/config_test.cc index 74dfe5daa6c42..dae71aa2e6038 100644 --- a/test/extensions/filters/http/oauth2/config_test.cc +++ b/test/extensions/filters/http/oauth2/config_test.cc @@ -1,7 +1,7 @@ #include #include -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/common/protobuf/utility.h" @@ -139,7 +139,7 @@ TEST(ConfigTest, InvalidHmacSecret) { TEST(ConfigTest, CreateFilterMissingConfig) { OAuth2Config config; - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2 proto_config; + envoy::extensions::filters::http::oauth2::v3::OAuth2 proto_config; NiceMock factory_context; EXPECT_THROW_WITH_MESSAGE( diff --git a/test/extensions/filters/http/oauth2/filter_test.cc b/test/extensions/filters/http/oauth2/filter_test.cc index d232adb224d24..129c0ec0fdfcc 100644 --- a/test/extensions/filters/http/oauth2/filter_test.cc +++ b/test/extensions/filters/http/oauth2/filter_test.cc @@ -1,8 +1,8 @@ #include #include -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h" -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.validate.h" #include "envoy/http/async_client.h" #include "envoy/http/message.h" @@ -96,7 +96,7 @@ class OAuth2Test : public testing::Test { // Set up proto fields with standard config. FilterConfigSharedPtr getConfig() { - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config p; + envoy::extensions::filters::http::oauth2::v3::OAuth2Config p; auto* endpoint = p.mutable_token_endpoint(); endpoint->set_cluster("auth.example.com"); endpoint->set_uri("auth.example.com/_oauth"); @@ -170,7 +170,7 @@ TEST_F(OAuth2Test, SdsDynamicGenericSecret) { NiceMock dispatcher; EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(init_manager, add(_)) @@ -245,7 +245,7 @@ TEST_F(OAuth2Test, InvalidCluster) { TEST_F(OAuth2Test, DefaultAuthScope) { // Set up proto fields with no auth scope set. - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config p; + envoy::extensions::filters::http::oauth2::v3::OAuth2Config p; auto* endpoint = p.mutable_token_endpoint(); endpoint->set_cluster("auth.example.com"); endpoint->set_uri("auth.example.com/_oauth"); diff --git a/test/extensions/filters/http/oauth2/oauth_integration_test.cc b/test/extensions/filters/http/oauth2/oauth_integration_test.cc index cf675c65f6433..721586ed18331 100644 --- a/test/extensions/filters/http/oauth2/oauth_integration_test.cc +++ b/test/extensions/filters/http/oauth2/oauth_integration_test.cc @@ -77,7 +77,7 @@ class OauthIntegrationTest : public testing::Test, public HttpIntegrationTest { config_helper_.prependFilter(TestEnvironment::substitute(R"EOF( name: oauth typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 config: token_endpoint: cluster: oauth diff --git a/test/extensions/filters/http/original_src/original_src_test.cc b/test/extensions/filters/http/original_src/original_src_test.cc index 5839baa88b9e5..a26db42297c79 100644 --- a/test/extensions/filters/http/original_src/original_src_test.cc +++ b/test/extensions/filters/http/original_src/original_src_test.cc @@ -110,9 +110,17 @@ TEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressUsesCorrectAddress) { option->hashKey(key); } - std::vector expected_key = {1, 2, 3, 4}; - - EXPECT_EQ(key, expected_key); + // The first part of the hash is the address. Then come the other options. On Windows there are + // is only the single option. On other platforms there are more that get hashed. + EXPECT_EQ(key[0], 1); + EXPECT_EQ(key[1], 2); + EXPECT_EQ(key[2], 3); + EXPECT_EQ(key[3], 4); +#ifndef WIN32 + EXPECT_GT(key.size(), 4); +#else + EXPECT_EQ(key.size(), 4); +#endif } TEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressBleachesPort) { diff --git a/test/extensions/filters/http/ratelimit/config_test.cc b/test/extensions/filters/http/ratelimit/config_test.cc index 58d39fd41c265..feeebbf19dd5f 100644 --- a/test/extensions/filters/http/ratelimit/config_test.cc +++ b/test/extensions/filters/http/ratelimit/config_test.cc @@ -81,11 +81,12 @@ TEST(RateLimitFilterConfigTest, BadRateLimitFilterConfig) { "route_key: Cannot find field"); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(RateLimitFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.rate_limit"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/rbac/BUILD b/test/extensions/filters/http/rbac/BUILD index e37cc1971f608..0e2cf13fc8cc7 100644 --- a/test/extensions/filters/http/rbac/BUILD +++ b/test/extensions/filters/http/rbac/BUILD @@ -38,6 +38,7 @@ envoy_extension_cc_test( "//test/mocks/network:network_mocks", "@envoy_api//envoy/config/rbac/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/rbac/matchers/upstream_ip_port/v3:pkg_cc_proto", ], ) @@ -46,7 +47,10 @@ envoy_extension_cc_test( srcs = ["rbac_filter_integration_test.cc"], extension_names = ["envoy.filters.http.rbac"], deps = [ + "//source/extensions/clusters/dynamic_forward_proxy:cluster", + "//source/extensions/filters/http/dynamic_forward_proxy:config", "//source/extensions/filters/http/rbac:config", + "//source/extensions/key_value/file_based:config_lib", "//test/config:utility_lib", "//test/integration:http_protocol_integration_lib", "@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/rbac/mocks.h b/test/extensions/filters/http/rbac/mocks.h index 7932a02fea4dc..3a079fc5758d6 100644 --- a/test/extensions/filters/http/rbac/mocks.h +++ b/test/extensions/filters/http/rbac/mocks.h @@ -2,6 +2,7 @@ #include "envoy/extensions/filters/http/rbac/v3/rbac.pb.h" +#include "source/common/protobuf/message_validator_impl.h" #include "source/extensions/filters/common/rbac/utility.h" #include "source/extensions/filters/http/rbac/rbac_filter.h" @@ -18,7 +19,8 @@ class MockRoleBasedAccessControlRouteSpecificFilterConfig public: MockRoleBasedAccessControlRouteSpecificFilterConfig( const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& r) - : RoleBasedAccessControlRouteSpecificFilterConfig(r){}; + : RoleBasedAccessControlRouteSpecificFilterConfig( + r, ProtobufMessage::getStrictValidationVisitor()){}; MOCK_METHOD(Filters::Common::RBAC::RoleBasedAccessControlEngineImpl&, engine, (), (const)); }; diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index 242ad7684071d..8894afc1303a9 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -540,5 +540,186 @@ TEST_P(RBACIntegrationTest, HeaderMatchConditionDuplicateHeaderMatch) { EXPECT_EQ("200", response->headers().getStatusValue()); } +// Helper for integration testing of RBAC filter with dynamic forward proxy. +class RbacDynamicForwardProxyIntegrationHelper + : public testing::TestWithParam, + public Event::TestUsingSimulatedTime, + public HttpIntegrationTest { +public: + RbacDynamicForwardProxyIntegrationHelper() + : HttpIntegrationTest(Http::CodecType::HTTP1, GetParam()) {} + + void initializeWithFilterConfigs(bool save_filter_state, const std::string& rbac_config) { + setUpstreamProtocol(Http::CodecType::HTTP1); + + const std::string save_upstream_config = + save_filter_state ? "save_upstream_address: true " : ""; + const std::string dfp_config = + fmt::format(R"EOF( +name: dynamic_forward_proxy +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig + {} + dns_cache_config: + name: foo + dns_lookup_family: {} +)EOF", + save_upstream_config, Network::Test::ipVersionToDnsFamily(GetParam())); + + config_helper_.prependFilter(rbac_config); + + config_helper_.prependFilter(dfp_config); + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Switch predefined cluster_0 to CDS filesystem sourcing. + bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_resource_api_version( + envoy::config::core::v3::ApiVersion::V3); + bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path()); + bootstrap.mutable_static_resources()->clear_clusters(); + }); + + // Set validate_clusters to false to allow us to reference a CDS cluster. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_route_config()->mutable_validate_clusters()->set_value(false); }); + + // Setup the initial CDS cluster. + cluster_.mutable_connect_timeout()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + cluster_.set_name("cluster_0"); + cluster_.set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED); + + ConfigHelper::HttpProtocolOptions protocol_options; + protocol_options.mutable_upstream_http_protocol_options()->set_auto_sni(true); + protocol_options.mutable_upstream_http_protocol_options()->set_auto_san_validation(true); + protocol_options.mutable_explicit_http_config()->mutable_http_protocol_options(); + ConfigHelper::setProtocolOptions(cluster_, protocol_options); + + const std::string cluster_type_config = fmt::format( + R"EOF( +name: envoy.clusters.dynamic_forward_proxy +typed_config: + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig + dns_cache_config: + name: foo + dns_lookup_family: {} +)EOF", + Network::Test::ipVersionToDnsFamily(GetParam())); + + TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type()); + // Load the CDS cluster and wait for it to initialize. + cds_helper_.setCds({cluster_}); + HttpIntegrationTest::initialize(); + test_server_->waitForCounterEq("cluster_manager.cluster_added", 1); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + } + + CdsHelper cds_helper_; + envoy::config::cluster::v3::Cluster cluster_; + bool write_cache_file_{}; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, RbacDynamicForwardProxyIntegrationHelper, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Verify that if upstream ip matcher is configured, upstream address is saved by a filter(dynamic +// forward proxy in this case). If not saved, the request would be denied. +TEST_P(RbacDynamicForwardProxyIntegrationHelper, AllowIpWithNoFilterState) { + const std::string rbac_config = R"EOF( +name: rbac +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + policies: + foo: + permissions: + - or_rules: + rules: + - matcher: + name: envoy.filters.http.rbac.matchers.upstream_ip_port + typed_config: + "@type": type.googleapis.com/envoy.extensions.rbac.matchers.upstream_ip_port.v3.UpstreamIpPortMatcher + upstream_ip: + address_prefix: 127.0.0.1 + prefix_len: 24 + principals: + - any: true +)EOF"; + + initializeWithFilterConfigs(false, rbac_config); + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port())}}; + + auto response = codec_client_->makeRequestWithBody(request_headers, 1024); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); +} + +// Verify that if upstream ip matcher is configured and upstream address is saved by dynamic +// forward proxy, then RBAC policy is evaluated correctly for `or_rules`. +#ifndef WIN32 +// TODO(conqerAtapple) figure out why this test doesn't pass on windows. +TEST_P(RbacDynamicForwardProxyIntegrationHelper, DenyIpOrPortWithFilterState) { + const std::string rbac_config = R"EOF( +name: rbac +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: DENY + policies: + foo: + permissions: + - or_rules: + rules: + - matcher: + name: envoy.filters.http.rbac.matchers.upstream_ip_port + typed_config: + "@type": type.googleapis.com/envoy.extensions.rbac.matchers.upstream_ip_port.v3.UpstreamIpPortMatcher + upstream_ip: + address_prefix: 127.2.1.1 + prefix_len: 24 + - matcher: + name: envoy.filters.http.rbac.matchers.upstream_ip_port + typed_config: + "@type": type.googleapis.com/envoy.extensions.rbac.matchers.upstream_ip_port.v3.UpstreamIpPortMatcher + upstream_ip: + address_prefix: 127.0.0.1 + prefix_len: 24 + - matcher: + name: envoy.filters.http.rbac.matchers.upstream_ip_port + typed_config: + "@type": type.googleapis.com/envoy.extensions.rbac.matchers.upstream_ip_port.v3.UpstreamIpPortMatcher + upstream_ip: + address_prefix: ::1 + prefix_len: 24 + principals: + - any: true +)EOF"; + + initializeWithFilterConfigs(true, rbac_config); + + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port())}}; + + auto response = codec_client_->makeRequestWithBody(request_headers, 1024); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); +} +#endif + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/rbac/rbac_filter_test.cc b/test/extensions/filters/http/rbac/rbac_filter_test.cc index 4d50a70421da0..9b2dc930d71a6 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_test.cc @@ -1,8 +1,10 @@ #include "envoy/config/rbac/v3/rbac.pb.h" #include "envoy/extensions/filters/http/rbac/v3/rbac.pb.h" +#include "envoy/extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.pb.h" #include "source/common/config/metadata.h" #include "source/common/network/utility.h" +#include "source/common/stream_info/upstream_address.h" #include "source/extensions/filters/common/rbac/utility.h" #include "source/extensions/filters/http/rbac/rbac_filter.h" @@ -49,18 +51,30 @@ class RoleBasedAccessControlFilterTest : public testing::Test { (*config.mutable_shadow_rules()->mutable_policies())["bar"] = shadow_policy; config.set_shadow_rules_stat_prefix("prefix_"); - return std::make_shared(config, "test", store_); + return std::make_shared( + config, "test", store_, ProtobufMessage::getStrictValidationVisitor()); } RoleBasedAccessControlFilterTest() : config_(setupConfig(envoy::config::rbac::v3::RBAC::ALLOW)), filter_(config_) {} void SetUp() override { + config_ = setupConfig(envoy::config::rbac::v3::RBAC::ALLOW); + filter_ = RoleBasedAccessControlFilter(config_); + EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_)); EXPECT_CALL(callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); filter_.setDecoderFilterCallbacks(callbacks_); } + void SetUp(RoleBasedAccessControlFilterConfigSharedPtr config) { + config_ = config; + filter_ = RoleBasedAccessControlFilter(config_); + + EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_)); + EXPECT_CALL(callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + filter_.setDecoderFilterCallbacks(callbacks_); + } void setDestinationPort(uint16_t port) { address_ = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", port, false); req_info_.downstream_connection_info_provider_->setLocalAddress(address_); @@ -266,6 +280,321 @@ TEST_F(RoleBasedAccessControlFilterTest, ShouldNotLog) { checkAccessLogMetadata(LogResult::No); } +// Upstream Ip and Port matcher tests. +class UpstreamIpPortMatcherTests : public RoleBasedAccessControlFilterTest { +public: + struct UpstreamIpPortMatcherConfig { + UpstreamIpPortMatcherConfig() = default; + + UpstreamIpPortMatcherConfig(const std::string& ip) : ip_(ip) {} + + UpstreamIpPortMatcherConfig(uint16_t start, uint16_t end) { + envoy::type::v3::Int64Range port_range; + port_range.set_start(start); + port_range.set_end(end); + port_range_ = port_range; + } + + UpstreamIpPortMatcherConfig(const std::string& ip, uint16_t start, uint16_t end) : ip_(ip) { + envoy::type::v3::Int64Range port_range; + port_range.set_start(start); + port_range.set_end(end); + port_range_ = port_range; + } + + absl::optional ip_; + absl::optional port_range_; + }; + + void upstreamIpTestsBasicPolicySetup(const std::vector& configs, + const envoy::config::rbac::v3::RBAC::Action& action) { + envoy::config::rbac::v3::Policy policy; + + auto policy_rules = policy.add_permissions()->mutable_or_rules(); + policy_rules->add_rules()->mutable_requested_server_name()->MergeFrom( + TestUtility::createRegexMatcher(".*cncf.io")); + + for (const auto& config : configs) { + envoy::extensions::rbac::matchers::upstream_ip_port::v3::UpstreamIpPortMatcher matcher; + + if (config.ip_) { + matcher.mutable_upstream_ip()->set_address_prefix(*config.ip_); + matcher.mutable_upstream_ip()->mutable_prefix_len()->set_value(32); + } + + if (config.port_range_) { + *matcher.mutable_upstream_port_range() = config.port_range_.value(); + } + + auto* matcher_ext_config = policy_rules->add_rules()->mutable_matcher(); + + *matcher_ext_config->mutable_name() = "envoy.rbac.matchers.upstream.upstream_ip_port"; + + matcher_ext_config->mutable_typed_config()->PackFrom(matcher); + } + + policy.add_principals()->set_any(true); + + envoy::extensions::filters::http::rbac::v3::RBAC config; + config.mutable_rules()->set_action(action); + (*config.mutable_rules()->mutable_policies())["foo"] = policy; + + auto config_ptr = std::make_shared( + config, "test", store_, ProtobufMessage::getStrictValidationVisitor()); + + // Setup test with the policy config. + SetUp(config_ptr); + } + + void upstreamIpTestsFilterStateSetup(NiceMock& callback, + const std::vector& upstream_ips) { + auto address_obj = std::make_unique(); + + for (const auto& ip : upstream_ips) { + Network::Address::InstanceConstSharedPtr address = + Envoy::Network::Utility::parseInternetAddressAndPort(ip, false); + + address_obj->address_ = address; + } + + // Set the filter state data. + callback.streamInfo().filterState()->setData( + StreamInfo::UpstreamAddress::key(), std::move(address_obj), + StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Request); + } +}; + +// Tests simple permission policy with no upstream ip metadata in the filter state. +TEST_F(UpstreamIpPortMatcherTests, UpstreamIpNoFilterStateMetadata) { + const std::vector configs = { + {"1.2.3.4"}, + }; + // Setup policy config. + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::ALLOW); + + // Filter iteration should be stopped as there is no filter state metadata. + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers_, false)); + + // Expect `denied` stats to be incremented. + EXPECT_EQ(1U, config_->stats().denied_.value()); +} + +// Tests simple upstream_ip ALLOW permission policy with ONLY upstream ip metadata in the filter +// state. +TEST_F(UpstreamIpPortMatcherTests, UpstreamIpWithFilterStateAllow) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:123"}); + + // Setup policy config. + const std::vector configs = { + {"1.2.3.4"}, + }; + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::ALLOW); + + // Filter iteration should continue since the policy is ALLOW. + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + + // Expect `allowed` stats to be incremented. + EXPECT_EQ(1U, config_->stats().allowed_.value()); +} + +// Tests simple upstream_ip DENY permission policy with ONLY upstream ip metadata in the filter +// state. +TEST_F(UpstreamIpPortMatcherTests, UpstreamIpWithFilterStateDeny) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:123"}); + + // Setup policy config. + const std::vector configs = { + {"1.2.3.4"}, + }; + + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::DENY); + + // Filter iteration should stop since the policy is DENY. + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers_, false)); + + // Expect `denied` stats to be incremented. + EXPECT_EQ(1U, config_->stats().denied_.value()); +} + +// Tests simple upstream_ip DENY permission policy with BOTH upstream ip and port matching the +// policy. +TEST_F(UpstreamIpPortMatcherTests, UpstreamIpPortMatchDeny) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:123"}); + + const std::vector configs = { + {"1.2.3.4", 120, 123}, + }; + + // Setup policy config. + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::DENY); + + // Filter iteration should stop since the policy is DENY. + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers_, false)); + + // Expect `denied` stats to be incremented. + EXPECT_EQ(1U, config_->stats().denied_.value()); +} + +TEST_F(UpstreamIpPortMatcherTests, UpstreamIpPortMatchAllow) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:123"}); + + const std::vector configs = { + {"1.2.3.4", 120, 123}, + }; + + // Setup policy config. + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::ALLOW); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + + // Expect `allowed` stats to be incremented. + EXPECT_EQ(1U, config_->stats().allowed_.value()); +} + +TEST_F(UpstreamIpPortMatcherTests, UpstreamPortMatchDeny) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:123"}); + + const std::vector configs = { + {120, 123}, + }; + + // Setup policy config. + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::DENY); + + // Filter iteration should stop since the policy is DENY. + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers_, false)); + + // Expect `denied` stats to be incremented. + EXPECT_EQ(1U, config_->stats().denied_.value()); +} + +TEST_F(UpstreamIpPortMatcherTests, UpstreamPortMatchAllow) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:123"}); + + const std::vector configs = { + {120, 123}, + }; + + // Setup policy config. + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::ALLOW); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + + // Expect `allowed` stats to be incremented. + EXPECT_EQ(1U, config_->stats().allowed_.value()); +} + +// Tests upstream_ip DENY permission policy with multiple upstream ips to match in the policy. +// If any of the configured upstream ip addresses match the metadata, the policy is enforced (DENY). +TEST_F(UpstreamIpPortMatcherTests, MultiUpstreamIpsAnyPolicyDeny) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:123"}); + + // Setup policy config. + const std::vector configs = { + {"1.1.1.2"}, {"1.2.3.4", 120, 123}, {"1.2.3.5"}}; + + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::DENY); + + // Filter iteration should stop since the policy is DENY. + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers_, false)); + + // Expect `denied` stats to be incremented. + EXPECT_EQ(1U, config_->stats().denied_.value()); +} + +// Tests upstream_ip DENY permission policy with multiple upstream ips to match in the policy. +// If ONLY port is configured in the policy, a match should enforce the policy. +TEST_F(UpstreamIpPortMatcherTests, MultiUpstreamIpsNoIpMatchPortMatchDeny) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"2.2.3.4:123"}); + + // Setup policy config. + const std::vector configs = {{"1.1.1.2"}, {120, 123}, {"1.2.3.5"}}; + + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::DENY); + + // Filter iteration should stop since the policy is DENY. + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers_, false)); + + // Expect `denied` stats to be incremented. + EXPECT_EQ(1U, config_->stats().denied_.value()); +} + +// Tests upstream_ip DENY permission policy with multiple upstream ips to match in the policy. +// If NONE of the configured upstream ip addresses or port match the metadata, the policy is NOT +// enforced. +TEST_F(UpstreamIpPortMatcherTests, MultiUpstreamIpsNoIpMatchNoPortMatchDeny) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"2.2.3.4:123"}); + + // Setup policy config. + const std::vector configs = {{"1.1.1.2"}, {124, 125}, {"1.2.3.5"}}; + + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::DENY); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + + // Expect `allowed` stats to be incremented. + EXPECT_EQ(1U, config_->stats().allowed_.value()); +} + +// Tests upstream_ip DENY permission policy with multiple upstream ips to match in the policy. +// If NONE of the configured upstream ip addresses or port match the metadata, the policy is NOT +// enforced. +TEST_F(UpstreamIpPortMatcherTests, MultiUpstreamIpsAnyPolicyNoMatchDeny) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:123"}); + + // Setup policy config. + const std::vector configs = { + {"1.1.1.2"}, {"1.2.3.4", 124, 125}, {"1.2.3.5"}}; + + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::DENY); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + + // Expect `allowed` stats to be incremented. + EXPECT_EQ(1U, config_->stats().allowed_.value()); +} + +// Tests simple DENY permission policy with misconfigured port range. +TEST_F(UpstreamIpPortMatcherTests, UpstreamPortBadRangeDeny) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:8080"}); + + const std::vector configs = { + {8080, 0}, + }; + + // Setup policy config. + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::DENY); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + + EXPECT_EQ(0, config_->stats().denied_.value()); +} + +// Verifies that if no IP or port is configured, EnvoyException is thrown. +TEST_F(UpstreamIpPortMatcherTests, EmptyUpstreamConfigPolicyDeny) { + // Setup filter state with the upstream address. + upstreamIpTestsFilterStateSetup(callbacks_, {"1.2.3.4:123"}); + + // Setup policy config. + const std::vector configs = {{}}; + + EXPECT_THROW_WITH_MESSAGE( + upstreamIpTestsBasicPolicySetup(configs, envoy::config::rbac::v3::RBAC::DENY), EnvoyException, + "Invalid UpstreamIpPortMatcher configuration - missing `upstream_ip` " + "and/or `upstream_port_range`"); +} + } // namespace } // namespace RBACFilter } // namespace HttpFilters diff --git a/test/extensions/filters/http/router/auto_sni_integration_test.cc b/test/extensions/filters/http/router/auto_sni_integration_test.cc index 25328614681fe..4c8307125d1a7 100644 --- a/test/extensions/filters/http/router/auto_sni_integration_test.cc +++ b/test/extensions/filters/http/router/auto_sni_integration_test.cc @@ -18,24 +18,29 @@ class AutoSniIntegrationTest : public testing::TestWithParammutable_clusters()->at(0); - ConfigHelper::HttpProtocolOptions protocol_options; - protocol_options.mutable_upstream_http_protocol_options()->set_auto_sni(true); - ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), - protocol_options); - - envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; - auto* validation_context = - tls_context.mutable_common_tls_context()->mutable_validation_context(); - validation_context->mutable_trusted_ca()->set_filename( - TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); - cluster_config.mutable_transport_socket()->set_name("envoy.transport_sockets.tls"); - cluster_config.mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context); - }); + config_helper_.addConfigModifier( + [override_auto_sni_header](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto& cluster_config = bootstrap.mutable_static_resources()->mutable_clusters()->at(0); + ConfigHelper::HttpProtocolOptions protocol_options; + protocol_options.mutable_upstream_http_protocol_options()->set_auto_sni(true); + if (!override_auto_sni_header.empty()) { + protocol_options.mutable_upstream_http_protocol_options()->set_override_auto_sni_header( + override_auto_sni_header); + } + ConfigHelper::setProtocolOptions( + *bootstrap.mutable_static_resources()->mutable_clusters(0), protocol_options); + + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; + auto* validation_context = + tls_context.mutable_common_tls_context()->mutable_validation_context(); + validation_context->mutable_trusted_ca()->set_filename( + TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); + cluster_config.mutable_transport_socket()->set_name("envoy.transport_sockets.tls"); + cluster_config.mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context); + }); HttpIntegrationTest::initialize(); } @@ -83,6 +88,26 @@ TEST_P(AutoSniIntegrationTest, BasicAutoSniTest) { EXPECT_STREQ("localhost", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); } +TEST_P(AutoSniIntegrationTest, AutoSniWithAltHeaderNameTest) { + setup("x-host"); + codec_client_ = makeHttpConnection(lookupPort("http")); + const auto response_ = + sendRequestAndWaitForResponse(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "localhost"}, + {"x-host", "custom"}}, + 0, default_response_headers_, 0); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response_->complete()); + + const Extensions::TransportSockets::Tls::SslHandshakerImpl* ssl_socket = + dynamic_cast( + fake_upstream_connection_->connection().ssl().get()); + EXPECT_STREQ("custom", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); +} + TEST_P(AutoSniIntegrationTest, PassingNotDNS) { setup(); codec_client_ = makeHttpConnection(lookupPort("http")); diff --git a/test/extensions/filters/http/router/config_test.cc b/test/extensions/filters/http/router/config_test.cc index 65d3f85544e72..912f2f912822f 100644 --- a/test/extensions/filters/http/router/config_test.cc +++ b/test/extensions/filters/http/router/config_test.cc @@ -86,11 +86,12 @@ TEST(RouterFilterConfigTest, RouterFilterWithEmptyProtoConfig) { cb(filter_callback); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(RouterFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.router"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/http/wasm/BUILD b/test/extensions/filters/http/wasm/BUILD index dffa959f0a6ff..59e7d75b33522 100644 --- a/test/extensions/filters/http/wasm/BUILD +++ b/test/extensions/filters/http/wasm/BUILD @@ -63,7 +63,6 @@ envoy_extension_cc_test( "//source/common/common:hex_lib", "//source/common/crypto:utility_lib", "//source/common/http:message_lib", - "//source/extensions/common/crypto:utility_lib", "//source/extensions/common/wasm:wasm_lib", "//source/extensions/filters/http/wasm:config", "//test/extensions/common/wasm:wasm_runtime", diff --git a/test/extensions/filters/http/wasm/config_test.cc b/test/extensions/filters/http/wasm/config_test.cc index 5f6312fa02e20..8e3305fc51033 100644 --- a/test/extensions/filters/http/wasm/config_test.cc +++ b/test/extensions/filters/http/wasm/config_test.cc @@ -38,7 +38,7 @@ class WasmFilterConfigTest : public Event::TestUsingSimulatedTime, ON_CALL(context_, listenerMetadata()).WillByDefault(ReturnRef(listener_metadata_)); EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager_)); ON_CALL(context_, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(context_, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(context_, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); } void SetUp() override { Envoy::Extensions::Common::Wasm::clearCodeCacheForTesting(); } @@ -151,6 +151,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromFileWasm) { // Check if the context still holds a valid Wasm even after the factory is destroyed. EXPECT_TRUE(context); EXPECT_TRUE(context->wasm()); + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api_->customStatNamespaces().registered("wasmcustom")); } TEST_P(WasmFilterConfigTest, YamlLoadFromFileWasmFailOpenOk) { diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc index a515204a56320..97e71eb118447 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc @@ -45,11 +45,12 @@ TEST(HttpInspectorConfigFactoryTest, TestCreateFactory) { EXPECT_NE(dynamic_cast(added_filter.get()), nullptr); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(HttpInspectorConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.listener.http_inspector"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry< Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name)); diff --git a/test/extensions/filters/listener/original_dst/config_test.cc b/test/extensions/filters/listener/original_dst/config_test.cc index 0800e080ac33d..167303d1ca40b 100644 --- a/test/extensions/filters/listener/original_dst/config_test.cc +++ b/test/extensions/filters/listener/original_dst/config_test.cc @@ -11,11 +11,12 @@ namespace ListenerFilters { namespace OriginalDst { namespace { -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(OriginalDstConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.listener.original_dst"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry< Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name)); diff --git a/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc b/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc index a2febfc6f7f3d..5369367dba95b 100644 --- a/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc @@ -43,11 +43,12 @@ TEST(OriginalSrcConfigFactoryTest, TestCreateFactory) { EXPECT_NE(dynamic_cast(added_filter.get()), nullptr); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(OriginalSrcConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.listener.original_src"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry< Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name)); diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 9b10643009dd6..763b774fe4386 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -1483,11 +1483,12 @@ TEST(ProxyProtocolConfigFactoryTest, TestCreateFactory) { EXPECT_NE(dynamic_cast(added_filter.get()), nullptr); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(ProxyProtocolConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.listener.proxy_protocol"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry< Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name)); diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc index 6e119c87dcd92..902855a7b4c8f 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc @@ -282,11 +282,12 @@ TEST_P(TlsInspectorTest, InlineReadSucceed) { EXPECT_EQ(Network::FilterStatus::Continue, filter_->onAccept(cb_)); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(TlsInspectorConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.listener.tls_inspector"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry< Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name)); diff --git a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc index a301eee1665e2..d58c361e40ac1 100644 --- a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc @@ -163,6 +163,10 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { std::make_shared("192.168.1.1")); std::string expected_sha_1("digest"); EXPECT_CALL(*ssl_, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha_1)); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::UpstreamProtocolError)); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, + setResponseCodeDetails("auth_digest_no_match")); EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_EQ(Network::FilterStatus::StopIteration, instance_->onNewConnection()); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::Connected); diff --git a/test/extensions/filters/network/client_ssl_auth/config_test.cc b/test/extensions/filters/network/client_ssl_auth/config_test.cc index 7c03c47be47b0..81702113a1797 100644 --- a/test/extensions/filters/network/client_ssl_auth/config_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/config_test.cc @@ -101,11 +101,12 @@ TEST(ClientSslAuthConfigFactoryTest, ValidateFail) { ProtoValidationException); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(ClientSslAuthConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.client_ssl_auth"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 index 21ad6d880835a..3fe10beb50e55 100644 --- a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 @@ -1,7 +1,7 @@ config { name: "envoy.filters.network.sni_dynamic_forward_proxy" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig" + type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3.FilterConfig" value: "\nP\nFenvoy.network.sni_dynamic_fo.filters.network.sni_dynamic_forward_proxy*\006\010\200\200\200\260\002" } } diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc index ca021727c2d17..cb906f81c2680 100644 --- a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc @@ -127,7 +127,7 @@ void UberFilterFuzzer::fuzz( case test::extensions::filters::network::Action::kAdvanceTime: { time_source_.advanceTimeAndRun( std::chrono::milliseconds(action.advance_time().milliseconds()), - factory_context_.dispatcher(), Event::Dispatcher::RunType::NonBlock); + factory_context_.mainThreadDispatcher(), Event::Dispatcher::RunType::NonBlock); break; } default: { diff --git a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc index 6772fe995e22c..941d5b121acc9 100644 --- a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc @@ -103,7 +103,7 @@ void UberWriteFilterFuzzer::fuzz( case test::extensions::filters::network::WriteAction::kAdvanceTime: { time_source_.advanceTimeAndRun( std::chrono::milliseconds(action.advance_time().milliseconds()), - factory_context_.dispatcher(), Event::Dispatcher::RunType::NonBlock); + factory_context_.mainThreadDispatcher(), Event::Dispatcher::RunType::NonBlock); break; } default: { diff --git a/test/extensions/filters/network/common/fuzz/utils/fakes.h b/test/extensions/filters/network/common/fuzz/utils/fakes.h index e9edf5bb5bba0..44193a979a9e0 100644 --- a/test/extensions/filters/network/common/fuzz/utils/fakes.h +++ b/test/extensions/filters/network/common/fuzz/utils/fakes.h @@ -13,7 +13,7 @@ class FakeFactoryContext : public MockFactoryContext { } AccessLog::AccessLogManager& accessLogManager() override { return access_log_manager_; } Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } - Event::Dispatcher& dispatcher() override { return *dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return *dispatcher_; } const Network::DrainDecision& drainDecision() override { return drain_manager_; } Init::Manager& initManager() override { return init_manager_; } ServerLifecycleNotifier& lifecycleNotifier() override { return lifecycle_notifier_; } diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index 56c60e93bb3f9..8cdbe6162927f 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -120,7 +120,7 @@ class ConnectionManagerTest : public testing::Test { filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); } - TimeSource& timeSystem() { return factory_context_.dispatcher().timeSource(); } + TimeSource& timeSystem() { return factory_context_.mainThreadDispatcher().timeSource(); } void initializeFilter() { initializeFilter(""); } diff --git a/test/extensions/filters/network/ext_authz/config_test.cc b/test/extensions/filters/network/ext_authz/config_test.cc index 2d26fc050621c..518162f43026e 100644 --- a/test/extensions/filters/network/ext_authz/config_test.cc +++ b/test/extensions/filters/network/ext_authz/config_test.cc @@ -64,11 +64,12 @@ TEST(ExtAuthzFilterConfigTest, ValidateFail) { TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { expectCorrectProto(); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(ExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.ext_authz"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 1681eea7db600..18c1963ceacc0 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1707,11 +1707,12 @@ stat_prefix: my_stat_prefix R"(inconsistent HTTP/2 custom SETTINGS parameter\(s\) detected; identifiers = \{0x0a\})"); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.http_connection_manager"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/network/http_connection_manager/config_test_base.h b/test/extensions/filters/network/http_connection_manager/config_test_base.h index 995cb9842e146..9241da2bff892 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test_base.h +++ b/test/extensions/filters/network/http_connection_manager/config_test_base.h @@ -34,7 +34,7 @@ parseHttpConnectionManagerFromYaml(const std::string& yaml) { class HttpConnectionManagerConfigTest : public testing::Test { public: NiceMock context_; - Http::SlowDateProviderImpl date_provider_{context_.dispatcher().timeSource()}; + Http::SlowDateProviderImpl date_provider_{context_.mainThreadDispatcher().timeSource()}; NiceMock route_config_provider_manager_; NiceMock scoped_routes_config_provider_manager_; NiceMock http_tracer_manager_; diff --git a/test/extensions/filters/network/mongo_proxy/config_test.cc b/test/extensions/filters/network/mongo_proxy/config_test.cc index 58b289a9176f2..0548d72ca7213 100644 --- a/test/extensions/filters/network/mongo_proxy/config_test.cc +++ b/test/extensions/filters/network/mongo_proxy/config_test.cc @@ -226,11 +226,12 @@ TEST(MongoFilterConfigTest, CorrectFaultConfigurationInProto) { cb(connection); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(MongoFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.mongo_proxy"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/network/ratelimit/BUILD b/test/extensions/filters/network/ratelimit/BUILD index 9ea2d27737a2a..ea09ac87dbb32 100644 --- a/test/extensions/filters/network/ratelimit/BUILD +++ b/test/extensions/filters/network/ratelimit/BUILD @@ -14,20 +14,26 @@ envoy_package() envoy_extension_cc_test( name = "ratelimit_test", srcs = ["ratelimit_test.cc"], - extension_names = ["envoy.filters.network.ratelimit"], + extension_names = [ + "envoy.filters.network.ratelimit", + "envoy.filters.network.tcp_proxy", + ], deps = [ "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_lib", "//source/common/stats:stats_lib", + "//source/common/tcp_proxy", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/ratelimit:ratelimit_lib", "//test/extensions/filters/common/ratelimit:ratelimit_mocks", "//test/mocks/network:network_mocks", "//test/mocks/ratelimit:ratelimit_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/mocks/tracing:tracing_mocks", "@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/ratelimit/config_test.cc b/test/extensions/filters/network/ratelimit/config_test.cc index 163ad4e52946a..c92b14ca2763b 100644 --- a/test/extensions/filters/network/ratelimit/config_test.cc +++ b/test/extensions/filters/network/ratelimit/config_test.cc @@ -88,11 +88,12 @@ ip_allowlist: '12' "ip_allowlist: Cannot find field"); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(RateLimitFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.ratelimit"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/network/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/ratelimit/ratelimit_test.cc index 3be03981d4899..8b3f702a6b81c 100644 --- a/test/extensions/filters/network/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/ratelimit/ratelimit_test.cc @@ -3,9 +3,12 @@ #include #include "envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h" +#include "envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h" #include "envoy/stats/stats.h" #include "source/common/buffer/buffer_impl.h" +#include "source/common/network/filter_manager_impl.h" +#include "source/common/tcp_proxy/tcp_proxy.h" #include "source/extensions/filters/network/ratelimit/ratelimit.h" #include "source/extensions/filters/network/well_known_names.h" @@ -13,6 +16,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/ratelimit/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/stream_info/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/printers.h" @@ -358,6 +362,96 @@ TEST_F(RateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { EXPECT_EQ(0U, stats_store_.counter("ratelimit.name.failure_mode_allowed").value()); } +class NetworkFilterManagerRateLimitTest : public testing::Test { +public: + void SetUp() override { + EXPECT_CALL(connection_, getReadBuffer).WillRepeatedly(Invoke([this]() { + return Network::StreamBuffer{read_buffer_, read_end_stream_}; + })); + EXPECT_CALL(connection_, getWriteBuffer).WillRepeatedly(Invoke([this]() { + return Network::StreamBuffer{write_buffer_, write_end_stream_}; + })); + } + + NiceMock connection_; + NiceMock socket_; + + Buffer::OwnedImpl read_buffer_; + Buffer::OwnedImpl write_buffer_; + bool read_end_stream_{}; + bool write_end_stream_{}; +}; + +// This is a very important flow so make sure it works correctly in aggregate. +TEST_F(NetworkFilterManagerRateLimitTest, RateLimitAndTcpProxy) { + InSequence s; + NiceMock factory_context; + NiceMock upstream_connection; + NiceMock conn_pool; + Network::FilterManagerImpl manager(connection_, socket_); + + std::string rl_yaml = R"EOF( +domain: foo +descriptors: +- entries: + - key: hello + value: world +stat_prefix: name + )EOF"; + + ON_CALL(factory_context.runtime_loader_.snapshot_, + featureEnabled("ratelimit.tcp_filter_enabled", 100)) + .WillByDefault(Return(true)); + ON_CALL(factory_context.runtime_loader_.snapshot_, + featureEnabled("ratelimit.tcp_filter_enforcing", 100)) + .WillByDefault(Return(true)); + + envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{}; + TestUtility::loadFromYaml(rl_yaml, proto_config); + + Extensions::NetworkFilters::RateLimitFilter::ConfigSharedPtr rl_config( + new Extensions::NetworkFilters::RateLimitFilter::Config(proto_config, factory_context.scope_, + factory_context.runtime_loader_)); + Extensions::Filters::Common::RateLimit::MockClient* rl_client = + new Extensions::Filters::Common::RateLimit::MockClient(); + manager.addReadFilter(std::make_shared( + rl_config, Extensions::Filters::Common::RateLimit::ClientPtr{rl_client})); + + factory_context.cluster_manager_.initializeThreadLocalClusters({"fake_cluster"}); + envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy; + tcp_proxy.set_stat_prefix("name"); + tcp_proxy.set_cluster("fake_cluster"); + TcpProxy::ConfigSharedPtr tcp_proxy_config(new TcpProxy::Config(tcp_proxy, factory_context)); + manager.addReadFilter( + std::make_shared(tcp_proxy_config, factory_context.cluster_manager_)); + + Extensions::Filters::Common::RateLimit::RequestCallbacks* request_callbacks{}; + EXPECT_CALL(*rl_client, limit(_, "foo", + testing::ContainerEq( + std::vector{{{{"hello", "world"}}}}), + testing::A(), _)) + .WillOnce(WithArgs<0>( + Invoke([&](Extensions::Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + request_callbacks = &callbacks; + }))); + + EXPECT_EQ(manager.initializeReadFilters(), true); + + EXPECT_CALL(factory_context.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) + .WillOnce(Return(Upstream::TcpPoolData([]() {}, &conn_pool))); + + request_callbacks->complete(Extensions::Filters::Common::RateLimit::LimitStatus::OK, nullptr, + nullptr, nullptr, "", nullptr); + conn_pool.poolReady(upstream_connection); + + Buffer::OwnedImpl buffer("hello"); + EXPECT_CALL(upstream_connection, write(BufferEqual(&buffer), _)); + read_buffer_.add("hello"); + manager.onRead(); + + connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + } // namespace RateLimitFilter } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/network/rbac/filter_test.cc b/test/extensions/filters/network/rbac/filter_test.cc index 0d62c804b5c59..7d2bcf4dc3c42 100644 --- a/test/extensions/filters/network/rbac/filter_test.cc +++ b/test/extensions/filters/network/rbac/filter_test.cc @@ -52,7 +52,8 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { config.set_enforcement_type(envoy::extensions::filters::network::rbac::v3::RBAC::CONTINUOUS); } - return std::make_shared(config, store_); + return std::make_shared( + config, store_, ProtobufMessage::getStrictValidationVisitor()); } RoleBasedAccessControlNetworkFilterTest() : config_(setupConfig()) { diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index 0286d6a5c5bc3..2b517317a6bd2 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -148,11 +148,12 @@ stat_prefix: foo cb(connection); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(RedisProxyFilterConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.redis_proxy"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index 404651366b7cc..3cf4cc3762411 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -518,7 +518,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithFaultInjectionIntegrationTest void RedisProxyIntegrationTest::initialize() { setUpstreamCount(num_upstreams_); - setDeterministic(); + setDeterministicValue(); config_helper_.renameListener("redis_proxy"); BaseIntegrationTest::initialize(); } diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index 6519261f14b54..ee11ffa8a5bc3 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -22,7 +22,7 @@ envoy_extension_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/upstream:basic_resource_limit_mocks", "//test/mocks/upstream:cluster_manager_mocks", - "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc index 8a6d9c62a224e..6d41a92a4298a 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -39,7 +39,7 @@ class SniDynamicProxyFilterIntegrationTest fmt::format(R"EOF( name: envoy.filters.http.dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig + "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3.FilterConfig dns_cache_config: name: foo dns_lookup_family: {} diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc index 5eda1b49de6b9..cbd3816f31668 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc @@ -1,4 +1,4 @@ -#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.h" +#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.pb.h" #include "envoy/network/connection.h" #include "source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h" diff --git a/test/extensions/filters/network/tcp_proxy/config_test.cc b/test/extensions/filters/network/tcp_proxy/config_test.cc index 4fabbeadcf224..ff3457f2cf7c4 100644 --- a/test/extensions/filters/network/tcp_proxy/config_test.cc +++ b/test/extensions/filters/network/tcp_proxy/config_test.cc @@ -72,11 +72,12 @@ TEST(ConfigTest, ConfigTest) { cb(connection); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(ConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.tcp_proxy"; - ASSERT_NE( + ASSERT_EQ( nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); diff --git a/test/extensions/filters/network/thrift_proxy/BUILD b/test/extensions/filters/network/thrift_proxy/BUILD index cd3fe562efe15..1642bd029c3bc 100644 --- a/test/extensions/filters/network/thrift_proxy/BUILD +++ b/test/extensions/filters/network/thrift_proxy/BUILD @@ -353,6 +353,7 @@ envoy_extension_cc_test( "//test/extensions/filters/network/thrift_proxy/driver:generate_fixture", ], extension_names = ["envoy.filters.network.thrift_proxy"], + shard_count = 4, deps = [ ":integration_lib", ":utility_lib", diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index 7ac6ee7bf94b8..ff9dd784f622d 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -1750,9 +1750,9 @@ payload_passthrough: true EXPECT_EQ(1U, store_.counter("test.response_reply").value()); EXPECT_EQ(0U, store_.counter("test.response_exception").value()); EXPECT_EQ(0U, store_.counter("test.response_invalid_type").value()); - // In payload_passthrough mode, Envoy cannot detect response error. - EXPECT_EQ(1U, store_.counter("test.response_success").value()); - EXPECT_EQ(0U, store_.counter("test.response_error").value()); + EXPECT_EQ(1U, store_.counter("test.response_passthrough").value()); + EXPECT_EQ(0U, store_.counter("test.response_success").value()); + EXPECT_EQ(1U, store_.counter("test.response_error").value()); } TEST_F(ThriftConnectionManagerTest, PayloadPassthroughRequestAndInvalidResponse) { diff --git a/test/extensions/filters/network/thrift_proxy/integration_test.cc b/test/extensions/filters/network/thrift_proxy/integration_test.cc index d13f9ddd2975d..179861e244968 100644 --- a/test/extensions/filters/network/thrift_proxy/integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/integration_test.cc @@ -1,5 +1,6 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "source/common/common/fmt.h" #include "source/extensions/filters/network/thrift_proxy/buffer_helper.h" #include "test/extensions/filters/network/thrift_proxy/integration.h" @@ -148,6 +149,11 @@ class ThriftConnManagerIntegrationTest // while oneway's are handled by the "poke" method. All other requests // are handled by "execute". FakeUpstream* getExpectedUpstream(bool oneway) { + int upstreamIdx = getExpectedUpstreamIdx(oneway); + return fake_upstreams_[upstreamIdx].get(); + } + + int getExpectedUpstreamIdx(bool oneway) { int upstreamIdx = 2; if (multiplexed_) { upstreamIdx = 0; @@ -157,7 +163,7 @@ class ThriftConnManagerIntegrationTest upstreamIdx = 1; } - return fake_upstreams_[upstreamIdx].get(); + return upstreamIdx; } TransportType transport_; @@ -225,8 +231,29 @@ TEST_P(ThriftConnManagerIntegrationTest, Success) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + EXPECT_EQ(1U, counter->value()); + if (payload_passthrough_ && + (transport_ == TransportType::Framed || transport_ == TransportType::Header) && + protocol_ != ProtocolType::Twitter) { + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(1U, counter->value()); + } else { + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(0U, counter->value()); + } + counter = test_server_->counter("thrift.thrift_stats.response_reply"); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_success"); EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_reply", upstream_idx)); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_success", upstream_idx)); + EXPECT_EQ(1U, counter->value()); } TEST_P(ThriftConnManagerIntegrationTest, IDLException) { @@ -252,13 +279,28 @@ TEST_P(ThriftConnManagerIntegrationTest, IDLException) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); - counter = test_server_->counter("thrift.thrift_stats.response_error"); - if (payload_passthrough_ && transport_ == TransportType::Framed && + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + if (payload_passthrough_ && + (transport_ == TransportType::Framed || transport_ == TransportType::Header) && protocol_ != ProtocolType::Twitter) { - EXPECT_EQ(0U, counter->value()); - } else { + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); EXPECT_EQ(1U, counter->value()); + } else { + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(0U, counter->value()); } + counter = test_server_->counter("thrift.thrift_stats.response_reply"); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("thrift.thrift_stats.response_error"); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_reply", upstream_idx)); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_error", upstream_idx)); + EXPECT_EQ(1U, counter->value()); } TEST_P(ThriftConnManagerIntegrationTest, Exception) { @@ -284,8 +326,15 @@ TEST_P(ThriftConnManagerIntegrationTest, Exception) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_exception"); EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_exception", upstream_idx)); + EXPECT_EQ(1U, counter->value()); } TEST_P(ThriftConnManagerIntegrationTest, EarlyClose) { @@ -361,6 +410,10 @@ TEST_P(ThriftConnManagerIntegrationTest, EarlyUpstreamClose) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_exception"); EXPECT_EQ(1U, counter->value()); } @@ -492,10 +545,18 @@ TEST_P(ThriftTwitterConnManagerIntegrationTest, Success) { EXPECT_TRUE(TestUtility::buffersEqual( Buffer::OwnedImpl(tcp_client->data().substr(upgrade_response_size)), response_bytes_)); + // 2 requests on downstream but the first is an upgrade, so only one on upstream side Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(2U, counter->value()); + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_success"); EXPECT_EQ(2U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_success", upstream_idx)); + EXPECT_EQ(1U, counter->value()); #endif } diff --git a/test/extensions/filters/network/thrift_proxy/mocks.h b/test/extensions/filters/network/thrift_proxy/mocks.h index b3eddda2cb352..a9e41e54a235d 100644 --- a/test/extensions/filters/network/thrift_proxy/mocks.h +++ b/test/extensions/filters/network/thrift_proxy/mocks.h @@ -10,6 +10,7 @@ #include "source/extensions/filters/network/thrift_proxy/protocol.h" #include "source/extensions/filters/network/thrift_proxy/router/router.h" #include "source/extensions/filters/network/thrift_proxy/router/router_ratelimit.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" #include "source/extensions/filters/network/thrift_proxy/transport.h" #include "test/mocks/network/mocks.h" @@ -65,6 +66,7 @@ class MockProtocol : public Protocol { MOCK_METHOD(void, setType, (ProtocolType)); MOCK_METHOD(bool, readMessageBegin, (Buffer::Instance & buffer, MessageMetadata& metadata)); MOCK_METHOD(bool, readMessageEnd, (Buffer::Instance & buffer)); + MOCK_METHOD(bool, peekReplyPayload, (Buffer::Instance & buffer, ReplyType& reply_type)); MOCK_METHOD(bool, readStructBegin, (Buffer::Instance & buffer, std::string& name)); MOCK_METHOD(bool, readStructEnd, (Buffer::Instance & buffer)); MOCK_METHOD(bool, readFieldBegin, diff --git a/test/extensions/filters/network/thrift_proxy/requirements.txt b/test/extensions/filters/network/thrift_proxy/requirements.txt index f5868c4c7d449..4b7d0cd7ca833 100644 --- a/test/extensions/filters/network/thrift_proxy/requirements.txt +++ b/test/extensions/filters/network/thrift_proxy/requirements.txt @@ -1,5 +1,5 @@ -thrift==0.13.0 \ - --hash=sha256:9af1c86bf73433afc6010ed376a6c6aca2b54099cc0d61895f640870a9ae7d89 six==1.16.0 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 +thrift==0.15.0 \ + --hash=sha256:87c8205a71cf8bbb111cb99b1f7495070fbc9cabb671669568854210da5b3e29 diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc index 3bcbb4bff7d52..22bcf7ca0910e 100644 --- a/test/extensions/filters/network/thrift_proxy/router_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_test.cc @@ -1122,7 +1122,7 @@ TEST_F(ThriftRouterTest, PoolTimeoutUpstreamTimeMeasurement) { startRequest(MessageType::Call); - dispatcher_.time_system_.advanceTimeWait(std::chrono::milliseconds(500)); + dispatcher_.globalTimeSystem().advanceTimeWait(std::chrono::milliseconds(500)); EXPECT_CALL(cluster_scope, histogram("thrift.upstream_rq_time", Stats::Histogram::Unit::Milliseconds)) .Times(0); @@ -1219,7 +1219,7 @@ TEST_P(ThriftRouterFieldTypeTest, CallWithUpstreamRqTime) { sendTrivialStruct(field_type); completeRequest(); - dispatcher_.time_system_.advanceTimeWait(std::chrono::milliseconds(500)); + dispatcher_.globalTimeSystem().advanceTimeWait(std::chrono::milliseconds(500)); EXPECT_CALL(cluster_scope, histogram("thrift.upstream_rq_time", Stats::Histogram::Unit::Milliseconds)); EXPECT_CALL(cluster_scope, diff --git a/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc b/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc index 5532da024e32c..9e8e80f7e93dc 100644 --- a/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc +++ b/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc @@ -147,6 +147,10 @@ class ShadowWriterTest : public testing::Test { MessageMetadataSharedPtr response_metadata = std::make_shared(); response_metadata->setMessageType(message_type); response_metadata->setSequenceId(1); + if (message_type == MessageType::Reply) { + const auto reply_type = success ? ReplyType::Success : ReplyType::Error; + response_metadata->setReplyType(reply_type); + } auto transport_ptr = NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport(); @@ -412,22 +416,14 @@ TEST_F(ShadowWriterTest, TestNullResponseDecoder) { EXPECT_TRUE(decoder_ptr->passthroughEnabled()); metadata_->setMessageType(MessageType::Reply); + metadata_->setReplyType(ReplyType::Success); EXPECT_EQ(FilterStatus::Continue, decoder_ptr->messageBegin(metadata_)); + EXPECT_TRUE(decoder_ptr->responseSuccess()); Buffer::OwnedImpl buffer; decoder_ptr->upstreamData(buffer); - EXPECT_EQ(FilterStatus::Continue, decoder_ptr->messageEnd()); - // First reply field. - { - FieldType field_type; - int16_t field_id = 0; - EXPECT_EQ(FilterStatus::Continue, decoder_ptr->messageBegin(metadata_)); - EXPECT_EQ(FilterStatus::Continue, decoder_ptr->fieldBegin("", field_type, field_id)); - EXPECT_TRUE(decoder_ptr->responseSuccess()); - } - EXPECT_EQ(FilterStatus::Continue, decoder_ptr->transportBegin(nullptr)); EXPECT_EQ(FilterStatus::Continue, decoder_ptr->transportEnd()); } diff --git a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc index 9bead20a44bae..8319b926e95cf 100644 --- a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc @@ -20,7 +20,7 @@ namespace ThriftProxy { class ThriftTranslationIntegrationTest : public testing::TestWithParam< - std::tuple>, + std::tuple>, public BaseThriftIntegrationTest { public: static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) @@ -42,13 +42,11 @@ class ThriftTranslationIntegrationTest } void initialize() override { - TransportType downstream_transport, upstream_transport; - ProtocolType downstream_protocol, upstream_protocol; - std::tie(downstream_transport, downstream_protocol, upstream_transport, upstream_protocol) = - GetParam(); + std::tie(downstream_transport_, downstream_protocol_, upstream_transport_, upstream_protocol_, + passthrough_) = GetParam(); - auto upstream_transport_proto = transportTypeToProto(upstream_transport); - auto upstream_protocol_proto = protocolTypeToProto(upstream_protocol); + auto upstream_transport_proto = transportTypeToProto(upstream_transport_); + auto upstream_protocol_proto = protocolTypeToProto(upstream_protocol_); envoy::extensions::filters::network::thrift_proxy::v3::ThriftProtocolOptions proto_opts; proto_opts.set_transport(upstream_transport_proto); @@ -61,27 +59,43 @@ class ThriftTranslationIntegrationTest (*opts)[NetworkFilterNames::get().ThriftProxy].PackFrom(proto_opts); }); + if (passthrough_) { + config_helper_.addFilterConfigModifier< + envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy>( + "thrift", [](Protobuf::Message& filter) { + auto& conn_manager = + dynamic_cast( + filter); + conn_manager.set_payload_passthrough(true); + }); + } + // Invent some varying, but deterministic, values to add. We use the add method instead of // execute because the default execute params contains a set and the ordering can vary across // generated payloads. std::vector args({ - fmt::format("{}", (static_cast(downstream_transport) << 8) + - static_cast(downstream_protocol)), - fmt::format("{}", (static_cast(upstream_transport) << 8) + - static_cast(upstream_protocol)), + fmt::format("{}", (static_cast(downstream_transport_) << 8) + + static_cast(downstream_protocol_)), + fmt::format("{}", (static_cast(upstream_transport_) << 8) + + static_cast(upstream_protocol_)), }); - PayloadOptions downstream_opts(downstream_transport, downstream_protocol, DriverMode::Success, + PayloadOptions downstream_opts(downstream_transport_, downstream_protocol_, DriverMode::Success, {}, "add", args); preparePayloads(downstream_opts, downstream_request_bytes_, downstream_response_bytes_); - PayloadOptions upstream_opts(upstream_transport, upstream_protocol, DriverMode::Success, {}, + PayloadOptions upstream_opts(upstream_transport_, upstream_protocol_, DriverMode::Success, {}, "add", args); preparePayloads(upstream_opts, upstream_request_bytes_, upstream_response_bytes_); BaseThriftIntegrationTest::initialize(); } + TransportType downstream_transport_; + ProtocolType downstream_protocol_; + TransportType upstream_transport_; + ProtocolType upstream_protocol_; + bool passthrough_; Buffer::OwnedImpl downstream_request_bytes_; Buffer::OwnedImpl downstream_response_bytes_; Buffer::OwnedImpl upstream_request_bytes_; @@ -89,17 +103,22 @@ class ThriftTranslationIntegrationTest }; static std::string paramToString( - const TestParamInfo>& + const TestParamInfo>& params) { TransportType downstream_transport, upstream_transport; ProtocolType downstream_protocol, upstream_protocol; - std::tie(downstream_transport, downstream_protocol, upstream_transport, upstream_protocol) = - params.param; - - return fmt::format("From{}{}To{}{}", transportNameForTest(downstream_transport), - protocolNameForTest(downstream_protocol), - transportNameForTest(upstream_transport), - protocolNameForTest(upstream_protocol)); + bool passthrough; + std::tie(downstream_transport, downstream_protocol, upstream_transport, upstream_protocol, + passthrough) = params.param; + + auto result = + fmt::format("From{}{}To{}{}", transportNameForTest(downstream_transport), + protocolNameForTest(downstream_protocol), + transportNameForTest(upstream_transport), protocolNameForTest(upstream_protocol)); + if (passthrough) { + result = fmt::format("{}Passthrough", result); + } + return result; } INSTANTIATE_TEST_SUITE_P( @@ -107,7 +126,7 @@ INSTANTIATE_TEST_SUITE_P( Combine(Values(TransportType::Framed, TransportType::Unframed, TransportType::Header), Values(ProtocolType::Binary, ProtocolType::Compact), Values(TransportType::Framed, TransportType::Unframed, TransportType::Header), - Values(ProtocolType::Binary, ProtocolType::Compact)), + Values(ProtocolType::Binary, ProtocolType::Compact), Values(false, true)), paramToString); // Tests that the proxy will translate between different downstream and upstream transports and @@ -135,8 +154,32 @@ TEST_P(ThriftTranslationIntegrationTest, Translates) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("cluster.cluster_0.thrift.upstream_rq_call"); + EXPECT_EQ(1U, counter->value()); + if (passthrough_ && + (downstream_transport_ == TransportType::Framed || + downstream_transport_ == TransportType::Header) && + (upstream_transport_ == TransportType::Framed || + upstream_transport_ == TransportType::Header) && + downstream_protocol_ == upstream_protocol_ && downstream_protocol_ != ProtocolType::Twitter) { + counter = test_server_->counter("thrift.thrift_stats.request_passthrough"); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(1U, counter->value()); + } else { + counter = test_server_->counter("thrift.thrift_stats.request_passthrough"); + EXPECT_EQ(0U, counter->value()); + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(0U, counter->value()); + } + counter = test_server_->counter("thrift.thrift_stats.response_reply"); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_success"); EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("cluster.cluster_0.thrift.upstream_resp_reply"); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("cluster.cluster_0.thrift.upstream_resp_success"); + EXPECT_EQ(1U, counter->value()); } } // namespace ThriftProxy diff --git a/test/extensions/filters/network/wasm/BUILD b/test/extensions/filters/network/wasm/BUILD index 6d24f7f9fda28..840a0f7afcbd8 100644 --- a/test/extensions/filters/network/wasm/BUILD +++ b/test/extensions/filters/network/wasm/BUILD @@ -28,7 +28,6 @@ envoy_extension_cc_test( "//source/common/common:base64_lib", "//source/common/common:hex_lib", "//source/common/crypto:utility_lib", - "//source/extensions/common/crypto:utility_lib", "//source/extensions/common/wasm:wasm_lib", "//source/extensions/filters/network/wasm:config", "//test/extensions/common/wasm:wasm_runtime", diff --git a/test/extensions/filters/network/wasm/config_test.cc b/test/extensions/filters/network/wasm/config_test.cc index 66132102bd929..9673c4d598a39 100644 --- a/test/extensions/filters/network/wasm/config_test.cc +++ b/test/extensions/filters/network/wasm/config_test.cc @@ -30,7 +30,7 @@ class WasmNetworkFilterConfigTest : public testing::TestWithParam { ON_CALL(context_, listenerMetadata()).WillByDefault(ReturnRef(listener_metadata_)); ON_CALL(context_, initManager()).WillByDefault(ReturnRef(init_manager_)); ON_CALL(context_, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(context_, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(context_, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); } void SetUp() override { Envoy::Extensions::Common::Wasm::clearCodeCacheForTesting(); } @@ -101,6 +101,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromFileWasm) { // Check if the context still holds a valid Wasm even after the factory is destroyed. EXPECT_TRUE(context); EXPECT_TRUE(context->wasm()); + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api_->customStatNamespaces().registered("wasmcustom")); } TEST_P(WasmNetworkFilterConfigTest, YamlLoadInlineWasm) { diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index c8aad7a22df86..aee57c57bbe07 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -36,7 +36,7 @@ envoy_extension_cc_test( "//test/mocks/server:listener_factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", - "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3:pkg_cc_proto", ], ) @@ -62,7 +62,7 @@ envoy_extension_cc_test( "//source/extensions/filters/udp/dns_filter:config", "//source/extensions/filters/udp/dns_filter:dns_filter_lib", "//test/integration:integration_lib", - "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc index ddf42cf751850..ea3bbf2c8b009 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc @@ -83,7 +83,7 @@ name: listener_0 listener_filters: name: "envoy.filters.udp.dns_filter" typed_config: - '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3.DnsFilterConfig' stat_prefix: "my_prefix" client_config: resolver_timeout: 1s @@ -151,7 +151,7 @@ name: listener_1 listener_filters: name: "envoy.filters.udp.dns_filter" typed_config: - '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3.DnsFilterConfig' stat_prefix: "external_resolver" server_config: inline_dns_table: @@ -171,7 +171,7 @@ name: listener_1 void setup(uint32_t upstream_count) { setUdpFakeUpstream(FakeUpstreamConfig::UdpConfig()); if (upstream_count > 1) { - setDeterministic(); + setDeterministicValue(); setUpstreamCount(upstream_count); config_helper_.addConfigModifier( [upstream_count](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index db9d906fff1fd..6aeb70b9edc88 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -1,5 +1,5 @@ -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.validate.h" #include "source/common/common/logger.h" #include "source/extensions/filters/udp/dns_filter/dns_filter_constants.h" @@ -68,7 +68,7 @@ class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime } void setup(const std::string& yaml) { - envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig config; + envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig config; TestUtility::loadFromYamlAndValidate(yaml, config); auto store = stats_store_.createScope("dns_scope"); ON_CALL(listener_factory_, scope()).WillByDefault(ReturnRef(*store)); diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc index 0416435dce46f..e07d67113db8c 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc @@ -1,5 +1,5 @@ -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.validate.h" #include "source/common/network/address_impl.h" #include "source/extensions/filters/udp/dns_filter/dns_filter_utils.h" diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc index a2a3231e37c0b..f2c4dd185a06d 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc @@ -20,7 +20,7 @@ class UdpProxyIntegrationTest : public testing::TestWithParam 1) { - setDeterministic(); + setDeterministicValue(); setUpstreamCount(upstream_count); config_helper_.addConfigModifier( [upstream_count](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { diff --git a/test/extensions/internal_redirect/BUILD b/test/extensions/internal_redirect/BUILD new file mode 100644 index 0000000000000..61bfc13947665 --- /dev/null +++ b/test/extensions/internal_redirect/BUILD @@ -0,0 +1,29 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "redirect_extension_integration_test", + srcs = [ + "redirect_extension_integration_test.cc", + ], + deps = [ + "//source/common/http:header_map_lib", + "//source/extensions/internal_redirect/allow_listed_routes:config", + "//source/extensions/internal_redirect/previous_routes:config", + "//source/extensions/internal_redirect/safe_cross_scheme:config", + "//test/integration:http_protocol_integration_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/internal_redirect/redirect_extension_integration_test.cc b/test/extensions/internal_redirect/redirect_extension_integration_test.cc new file mode 100644 index 0000000000000..3666eb7104ebb --- /dev/null +++ b/test/extensions/internal_redirect/redirect_extension_integration_test.cc @@ -0,0 +1,299 @@ +#include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h" + +#include "test/integration/http_protocol_integration.h" + +namespace Envoy { + +using ::testing::HasSubstr; + +namespace { +constexpr char kTestHeaderKey[] = "test-header"; +} // namespace + +class RedirectExtensionIntegrationTest : public HttpProtocolIntegrationTest { +public: + void initialize() override { + setMaxRequestHeadersKb(60); + setMaxRequestHeadersCount(100); + envoy::config::route::v3::RetryPolicy retry_policy; + + auto pass_through = config_helper_.createVirtualHost("pass.through.internal.redirect"); + config_helper_.addVirtualHost(pass_through); + + auto handle = config_helper_.createVirtualHost("handle.internal.redirect"); + handle.mutable_routes(0)->set_name("redirect"); + handle.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy(); + config_helper_.addVirtualHost(handle); + + auto handle_max_3_hop = + config_helper_.createVirtualHost("handle.internal.redirect.max.three.hop"); + handle_max_3_hop.mutable_routes(0)->set_name("max_three_hop"); + handle_max_3_hop.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy(); + handle_max_3_hop.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy() + ->mutable_max_internal_redirects() + ->set_value(3); + config_helper_.addVirtualHost(handle_max_3_hop); + + HttpProtocolIntegrationTest::initialize(); + } + +protected: + // Returns the next stream that the fake upstream receives. + FakeStreamPtr waitForNextStream() { + FakeStreamPtr new_stream = nullptr; + auto wait_new_stream_fn = [this, + &new_stream](FakeHttpConnectionPtr& connection) -> AssertionResult { + AssertionResult result = + connection->waitForNewStream(*dispatcher_, new_stream, std::chrono::milliseconds(50)); + if (result) { + ASSERT(new_stream); + } + return result; + }; + + // Using a while loop to poll for new connections and new streams on all + // connections because connection reuse may or may not be triggered. + while (new_stream == nullptr) { + FakeHttpConnectionPtr new_connection = nullptr; + AssertionResult result = fake_upstreams_[0]->waitForHttpConnection( + *dispatcher_, new_connection, std::chrono::milliseconds(50)); + if (result) { + ASSERT(new_connection); + upstream_connections_.push_back(std::move(new_connection)); + } + + for (auto& connection : upstream_connections_) { + result = wait_new_stream_fn(connection); + if (result) { + break; + } + } + } + + AssertionResult result = new_stream->waitForEndStream(*dispatcher_); + ASSERT(result); + return new_stream; + } + + Http::TestResponseHeaderMapImpl redirect_response_{{":status", "302"}, + {"content-length", "0"}, + {"location", "http://authority2/new/url"}, + // Test header added to confirm that response + // headers are populated for internal redirects + {kTestHeaderKey, "test-header-value"}}; + Envoy::Http::LowerCaseString test_header_key_{kTestHeaderKey}; + std::vector upstream_connections_; +}; + +TEST_P(RedirectExtensionIntegrationTest, InternalRedirectPreventedByPreviousRoutesPredicate) { + useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); + auto handle_prevent_repeated_target = + config_helper_.createVirtualHost("handle.internal.redirect.no.repeated.target"); + auto* internal_redirect_policy = handle_prevent_repeated_target.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig + previous_routes_config; + auto* predicate = internal_redirect_policy->add_predicates(); + predicate->set_name("previous_routes"); + predicate->mutable_typed_config()->PackFrom(previous_routes_config); + config_helper_.addVirtualHost(handle_prevent_repeated_target); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect.no.repeated.target"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation("http://handle.internal.redirect.no.repeated.target/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to the same route as the first redirect. This should fail. + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("http://handle.internal.redirect.max.three.hop/yet/another/path", + response->headers().getLocationValue()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_, 0), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 1), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 2), + HasSubstr("302 via_upstream test-header-value\n")); + EXPECT_EQ("test-header-value", + response->headers().get(test_header_key_)[0]->value().getStringView()); +} + +TEST_P(RedirectExtensionIntegrationTest, InternalRedirectPreventedByAllowListedRoutesPredicate) { + useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); + auto handle_allow_listed_redirect_route = + config_helper_.createVirtualHost("handle.internal.redirect.only.allow.listed.target"); + auto* internal_redirect_policy = handle_allow_listed_redirect_route.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + + auto* allow_listed_routes_predicate = internal_redirect_policy->add_predicates(); + allow_listed_routes_predicate->set_name("allow_listed_routes"); + envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig + allow_listed_routes_config; + *allow_listed_routes_config.add_allowed_route_names() = "max_three_hop"; + allow_listed_routes_predicate->mutable_typed_config()->PackFrom(allow_listed_routes_config); + + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + + config_helper_.addVirtualHost(handle_allow_listed_redirect_route); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect.only.allow.listed.target"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation( + "http://handle.internal.redirect.only.allow.listed.target/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to the non-allow-listed route. This should fail. + redirect_response_.setLocation("http://handle.internal.redirect/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("http://handle.internal.redirect/yet/another/path", + response->headers().getLocationValue()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_, 0), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 1), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 2), + HasSubstr("302 via_upstream test-header-value\n")); + EXPECT_EQ("test-header-value", + response->headers().get(test_header_key_)[0]->value().getStringView()); +} + +TEST_P(RedirectExtensionIntegrationTest, InternalRedirectPreventedBySafeCrossSchemePredicate) { + useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); + auto handle_safe_cross_scheme_route = config_helper_.createVirtualHost( + "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); + auto* internal_redirect_policy = handle_safe_cross_scheme_route.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + + internal_redirect_policy->set_allow_cross_scheme_redirect(true); + + auto* predicate = internal_redirect_policy->add_predicates(); + predicate->set_name("safe_cross_scheme_predicate"); + envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig + predicate_config; + predicate->mutable_typed_config()->PackFrom(predicate_config); + + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + + config_helper_.addVirtualHost(handle_safe_cross_scheme_route); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost( + "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation( + "http://handle.internal.redirect.only.allow.safe.cross.scheme.redirect/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to https target. This should fail. + redirect_response_.setLocation("https://handle.internal.redirect/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("https://handle.internal.redirect/yet/another/path", + response->headers().getLocationValue()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_, 0), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 1), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 2), + HasSubstr("302 via_upstream test-header-value\n")); + EXPECT_EQ("test-header-value", + response->headers().get(test_header_key_)[0]->value().getStringView()); +} + +INSTANTIATE_TEST_SUITE_P(Protocols, RedirectExtensionIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +} // namespace Envoy diff --git a/test/extensions/key_value/file_based/BUILD b/test/extensions/key_value/file_based/BUILD index d55e4a2866e35..e1746a031907a 100644 --- a/test/extensions/key_value/file_based/BUILD +++ b/test/extensions/key_value/file_based/BUILD @@ -8,6 +8,24 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_test( + name = "alternate_protocols_cache_impl_test", + srcs = ["alternate_protocols_cache_impl_test.cc"], + deps = [ + "//source/common/common:key_value_store_lib", + "//source/common/http:alternate_protocols_cache", + "//source/common/singleton:manager_impl_lib", + "//source/extensions/key_value/file_based:config_lib", + "//test/common/http:common_lib", + "//test/mocks:common_lib", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:simulated_time_system_lib", + "@envoy_api//envoy/config/common/key_value/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/key_value/file_based/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "key_value_store_test", srcs = ["key_value_store_test.cc"], diff --git a/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc b/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc new file mode 100644 index 0000000000000..113a3c713b10c --- /dev/null +++ b/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc @@ -0,0 +1,50 @@ +#include "envoy/config/common/key_value/v3/config.pb.validate.h" +#include "envoy/extensions/key_value/file_based/v3/config.pb.h" + +#include "source/common/http/alternate_protocols_cache_manager_impl.h" +#include "source/common/singleton/manager_impl.h" + +#include "test/mocks/server/factory_context.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/simulated_time_system.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { +class AlternateProtocolsCacheManagerTest : public testing::Test, + public Event::TestUsingSimulatedTime { +public: + AlternateProtocolsCacheManagerTest() { + options_.set_name("name"); + options_.mutable_max_entries()->set_value(10); + } + void initialize() { + Http::AlternateProtocolsData data = {context_}; + factory_ = std::make_unique(singleton_manager_, + tls_, data); + manager_ = factory_->get(); + } + Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; + NiceMock context_; + testing::NiceMock tls_; + std::unique_ptr factory_; + Http::AlternateProtocolsCacheManagerSharedPtr manager_; + envoy::config::core::v3::AlternateProtocolsCacheOptions options_; + testing::NiceMock dispatcher_; +}; + +TEST_F(AlternateProtocolsCacheManagerTest, GetCacheWithFlushingAndConcurrency) { + envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig config; + config.set_filename("foo"); + envoy::config::common::key_value::v3::KeyValueStoreConfig kv_config; + kv_config.mutable_config()->set_name("envoy.key_value.file_based"); + kv_config.mutable_config()->mutable_typed_config()->PackFrom(config); + options_.mutable_key_value_store_config()->set_name("envoy.common.key_value"); + options_.mutable_key_value_store_config()->mutable_typed_config()->PackFrom(kv_config); + initialize(); + manager_->getCache(options_, dispatcher_); +} + +} // namespace +} // namespace Envoy diff --git a/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_test.cc b/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_test.cc index 400f97a4d11b2..5d05210ad4f11 100644 --- a/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_test.cc +++ b/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_test.cc @@ -26,7 +26,7 @@ class TestableInjectedResourceMonitor : public InjectedResourceMonitor { const envoy::extensions::resource_monitors::injected_resource::v3::InjectedResourceConfig& config, Server::Configuration::ResourceMonitorFactoryContext& context) - : InjectedResourceMonitor(config, context), dispatcher_(context.dispatcher()) {} + : InjectedResourceMonitor(config, context), dispatcher_(context.mainThreadDispatcher()) {} protected: void onFileChanged() override { diff --git a/test/extensions/retry/host/previous_hosts/integration_test.cc b/test/extensions/retry/host/previous_hosts/integration_test.cc index a433ad4e224b6..a05dfe82e3a7c 100644 --- a/test/extensions/retry/host/previous_hosts/integration_test.cc +++ b/test/extensions/retry/host/previous_hosts/integration_test.cc @@ -17,7 +17,7 @@ class PrevioustHostsIntegrationTest : public testing::Test, public HttpIntegrati : HttpIntegrationTest(Http::CodecType::HTTP2, Network::Address::IpVersion::v4) {} void initialize() override { - setDeterministic(); + setDeterministicValue(); // Add the retry configuration to a new virtual host. const auto vhost_config = R"EOF( diff --git a/test/extensions/stats_sinks/dog_statsd/config_test.cc b/test/extensions/stats_sinks/dog_statsd/config_test.cc index 98b02ba931a9a..86721e448853b 100644 --- a/test/extensions/stats_sinks/dog_statsd/config_test.cc +++ b/test/extensions/stats_sinks/dog_statsd/config_test.cc @@ -142,10 +142,11 @@ TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { EXPECT_EQ(udp_sink->getPrefix(), customPrefix); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(DogStatsdConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { - ASSERT_NE(nullptr, Registry::FactoryRegistry::getFactory( - DogStatsdName)); + ASSERT_EQ(nullptr, Registry::FactoryRegistry::getFactory( + "envoy.dog_statsd")); } } // namespace diff --git a/test/extensions/stats_sinks/metrics_service/config_test.cc b/test/extensions/stats_sinks/metrics_service/config_test.cc index 739e49ba5ae2f..313ac51b60c73 100644 --- a/test/extensions/stats_sinks/metrics_service/config_test.cc +++ b/test/extensions/stats_sinks/metrics_service/config_test.cc @@ -12,10 +12,11 @@ namespace StatSinks { namespace MetricsService { namespace { -// Test that the deprecated extension name still functions. +// Test that the extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(MetricsServiceConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { - ASSERT_NE(nullptr, Registry::FactoryRegistry::getFactory( - MetricsServiceName)); + ASSERT_EQ(nullptr, Registry::FactoryRegistry::getFactory( + "envoy.metrics_service")); } } // namespace diff --git a/test/extensions/stats_sinks/statsd/config_test.cc b/test/extensions/stats_sinks/statsd/config_test.cc index af575796eb848..97d7dd87f821e 100644 --- a/test/extensions/stats_sinks/statsd/config_test.cc +++ b/test/extensions/stats_sinks/statsd/config_test.cc @@ -42,11 +42,12 @@ TEST(StatsConfigTest, ValidTcpStatsd) { EXPECT_NE(dynamic_cast(sink.get()), nullptr); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(StatsConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.statsd"; - ASSERT_NE(nullptr, Registry::FactoryRegistry::getFactory( + ASSERT_EQ(nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); } diff --git a/test/extensions/stats_sinks/wasm/config_test.cc b/test/extensions/stats_sinks/wasm/config_test.cc index 64a7d4333832d..342b25218a880 100644 --- a/test/extensions/stats_sinks/wasm/config_test.cc +++ b/test/extensions/stats_sinks/wasm/config_test.cc @@ -90,6 +90,9 @@ TEST_P(WasmStatSinkConfigTest, CreateWasmFromWASM) { initializeWithConfig(config_); EXPECT_NE(sink_, nullptr); + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api_->customStatNamespaces().registered("wasmcustom")); + NiceMock snapshot; sink_->flush(snapshot); NiceMock histogram; diff --git a/test/extensions/tracers/dynamic_ot/config_test.cc b/test/extensions/tracers/dynamic_ot/config_test.cc index 52b3dd02aa211..06c81d180b05e 100644 --- a/test/extensions/tracers/dynamic_ot/config_test.cc +++ b/test/extensions/tracers/dynamic_ot/config_test.cc @@ -53,11 +53,12 @@ TEST(DynamicOtTracerConfigTest, DynamicOpentracingHttpTracer) { EXPECT_NE(nullptr, tracer); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(DynamicOtTracerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.dynamic.ot"; - ASSERT_NE(nullptr, Registry::FactoryRegistry::getFactory( + ASSERT_EQ(nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); } diff --git a/test/extensions/tracers/lightstep/config_test.cc b/test/extensions/tracers/lightstep/config_test.cc index 2e290d88edde1..d25e23e07de65 100644 --- a/test/extensions/tracers/lightstep/config_test.cc +++ b/test/extensions/tracers/lightstep/config_test.cc @@ -70,11 +70,12 @@ TEST(LightstepTracerConfigTest, LightstepHttpTracerAccessToken) { EXPECT_NE(nullptr, lightstep_tracer); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(LightstepTracerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.lightstep"; - ASSERT_NE(nullptr, Registry::FactoryRegistry::getFactory( + ASSERT_EQ(nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); } diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index 8f81c9138b478..71541239aa496 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -36,13 +36,13 @@ struct MockDaemonBroker : DaemonBroker { }; struct TraceProperties { - TraceProperties(const std::string span_name, const std::string origin_name, - const std::string aws_key_value, const std::string operation_name, - const std::string http_method, const std::string http_url, - const std::string user_agent) + TraceProperties(const std::string& span_name, const std::string& origin_name, + const std::string& aws_key_value, const std::string& operation_name, + const std::string& http_method, const std::string& http_url, + const std::string& user_agent, const std::string& direction) : span_name(span_name), origin_name(origin_name), aws_key_value(aws_key_value), operation_name(operation_name), http_method(http_method), http_url(http_url), - user_agent(user_agent) {} + user_agent(user_agent), direction(direction) {} const std::string span_name; const std::string origin_name; const std::string aws_key_value; @@ -50,17 +50,19 @@ struct TraceProperties { const std::string http_method; const std::string http_url; const std::string user_agent; + const std::string direction; }; class XRayTracerTest : public ::testing::Test { public: XRayTracerTest() : broker_(std::make_unique("127.0.0.1:2000")), - expected_(std::make_unique("Service 1", "AWS::Service::Proxy", - "test_value", "Create", "POST", "/first/second", - "Mozilla/5.0 (Macintosh; Intel Mac OS X)")) {} + expected_(std::make_unique( + "Service 1", "AWS::Service::Proxy", "test_value", "egress hostname", "POST", + "/first/second", "Mozilla/5.0 (Macintosh; Intel Mac OS X)", "egress")) {} absl::flat_hash_map aws_metadata_; NiceMock server_; + NiceMock config_; std::unique_ptr broker_; std::unique_ptr expected_; void commonAsserts(daemon::Segment& s); @@ -75,14 +77,15 @@ void XRayTracerTest::commonAsserts(daemon::Segment& s) { EXPECT_EQ(expected_->http_url, s.http().request().fields().at("url").string_value().c_str()); EXPECT_EQ(expected_->user_agent, s.http().request().fields().at(Tracing::Tags::get().UserAgent).string_value().c_str()); + EXPECT_EQ(expected_->direction, s.annotations().at("direction").c_str()); } TEST_F(XRayTracerTest, SerializeSpanTest) { constexpr uint32_t expected_status_code = 202; constexpr uint32_t expected_content_length = 1337; - constexpr auto expected_client_ip = "10.0.0.100"; - constexpr auto expected_x_forwarded_for = false; - constexpr auto expected_upstream_address = "10.0.0.200"; + constexpr absl::string_view expected_client_ip = "10.0.0.100"; + constexpr bool expected_x_forwarded_for = false; + constexpr absl::string_view expected_upstream_address = "10.0.0.200"; auto on_send = [&](const std::string& json) { ASSERT_FALSE(json.empty()); @@ -92,7 +95,7 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { commonAsserts(s); EXPECT_FALSE(s.trace_id().empty()); EXPECT_FALSE(s.id().empty()); - EXPECT_EQ(1, s.annotations().size()); + EXPECT_EQ(2, s.annotations().size()); EXPECT_TRUE(s.parent_id().empty()); EXPECT_FALSE(s.fault()); /*server error*/ EXPECT_FALSE(s.error()); /*client error*/ @@ -101,20 +104,21 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); EXPECT_EQ(expected_content_length, s.http().response().fields().at("content_length").number_value()); - EXPECT_STREQ(expected_client_ip, - s.http().request().fields().at("client_ip").string_value().c_str()); + EXPECT_EQ(expected_client_ip, + s.http().request().fields().at("client_ip").string_value().c_str()); EXPECT_EQ(expected_x_forwarded_for, s.http().request().fields().at("x_forwarded_for").bool_value()); - EXPECT_STREQ(expected_upstream_address, - s.annotations().at(Tracing::Tags::get().UpstreamAddress).c_str()); + EXPECT_EQ(expected_upstream_address, + s.annotations().at(Tracing::Tags::get().UpstreamAddress).c_str()); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -126,7 +130,7 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { } TEST_F(XRayTracerTest, SerializeSpanTestServerError) { - constexpr auto expected_error = "true"; + constexpr absl::string_view expected_error = "true"; constexpr uint32_t expected_status_code = 503; auto on_send = [&](const std::string& json) { @@ -144,12 +148,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestServerError) { s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -177,12 +182,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestClientError) { s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -209,12 +215,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestClientErrorWithThrottle) { s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -235,12 +242,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestWithEmptyValue) { EXPECT_FALSE(s.http().request().fields().contains(Tracing::Tags::get().Status)); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -249,8 +257,9 @@ TEST_F(XRayTracerTest, SerializeSpanTestWithEmptyValue) { } TEST_F(XRayTracerTest, SerializeSpanTestWithStatusCodeNotANumber) { - constexpr auto expected_status_code = "ok"; // status code which is not a number - constexpr auto expected_content_length = "huge"; // response length which is not a number + constexpr absl::string_view expected_status_code = "ok"; // status code which is not a number + constexpr absl::string_view expected_content_length = + "huge"; // response length which is not a number auto on_send = [&](const std::string& json) { ASSERT_FALSE(json.empty()); @@ -265,12 +274,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestWithStatusCodeNotANumber) { EXPECT_FALSE(s.http().request().fields().contains("content_length")); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -318,15 +328,15 @@ TEST_F(XRayTracerTest, GetTraceId) { } TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { - NiceMock config; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); const auto& broker = *broker_; Tracer tracer{expected_->span_name, "", aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; // Span id taken from random generator EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(999)); - auto parent_span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto parent_span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); const XRay::Span* xray_parent_span = static_cast(parent_span.get()); auto on_send = [&](const std::string& json) { @@ -345,17 +355,17 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { // Span id taken from random generator EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(262626262626)); - auto child = - parent_span->spawnChild(config, expected_->operation_name, server_.timeSource().systemTime()); + auto child = parent_span->spawnChild(config_, expected_->operation_name, + server_.timeSource().systemTime()); child->finishSpan(); } TEST_F(XRayTracerTest, UseExistingHeaderInformation) { + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); XRayHeader xray_header; xray_header.trace_id_ = "a"; xray_header.parent_id_ = "b"; - constexpr auto span_name = "my span"; - constexpr auto operation_name = "my operation"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", @@ -363,7 +373,7 @@ TEST_F(XRayTracerTest, UseExistingHeaderInformation) { std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header); + auto span = tracer.startSpan(config_, "ingress", server_.timeSource().systemTime(), xray_header); const XRay::Span* xray_span = static_cast(span.get()); EXPECT_STREQ(xray_header.trace_id_.c_str(), xray_span->traceId().c_str()); @@ -371,13 +381,13 @@ TEST_F(XRayTracerTest, UseExistingHeaderInformation) { } TEST_F(XRayTracerTest, DontStartSpanOnNonSampledSpans) { + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); XRayHeader xray_header; xray_header.trace_id_ = "a"; xray_header.parent_id_ = "b"; xray_header.sample_decision_ = SamplingDecision::NotSampled; // not sampled means we should panic on calling startSpan - constexpr auto span_name = "my span"; - constexpr auto operation_name = "my operation"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", @@ -386,18 +396,18 @@ TEST_F(XRayTracerTest, DontStartSpanOnNonSampledSpans) { server_.timeSource(), server_.api().randomGenerator()}; Tracing::SpanPtr span; - ASSERT_DEATH(span = - tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header), - "panic: not reached"); + ASSERT_DEATH( + span = tracer.startSpan(config_, "ingress", server_.timeSource().systemTime(), xray_header), + "panic: not reached"); } TEST_F(XRayTracerTest, UnknownSpanStillSampled) { + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); XRayHeader xray_header; xray_header.trace_id_ = "a"; xray_header.parent_id_ = "b"; xray_header.sample_decision_ = SamplingDecision::Unknown; - constexpr auto span_name = "my span"; - constexpr auto operation_name = "my operation"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", @@ -405,7 +415,7 @@ TEST_F(XRayTracerTest, UnknownSpanStillSampled) { std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header); + auto span = tracer.startSpan(config_, "ingress", server_.timeSource().systemTime(), xray_header); const XRay::Span* xray_span = static_cast(span.get()); EXPECT_STREQ(xray_header.trace_id_.c_str(), xray_span->traceId().c_str()); @@ -416,8 +426,8 @@ TEST_F(XRayTracerTest, UnknownSpanStillSampled) { } TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { - constexpr auto span_name = "my span"; - constexpr auto operation_name = "my operation"; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", @@ -425,7 +435,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), + auto span = tracer.startSpan(config_, "ingress", server_.timeSource().systemTime(), absl::nullopt /*headers*/); Http::TestRequestHeaderMapImpl request_headers; span->injectContext(request_headers); @@ -437,7 +447,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { } TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { - constexpr auto span_name = "my span"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", aws_metadata_, @@ -455,7 +465,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { } TEST_F(XRayTracerTest, TraceIDFormatTest) { - constexpr auto span_name = "my span"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", aws_metadata_, @@ -479,6 +489,8 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, XRayDaemonTest, TestUtility::ipTestParamsToString); TEST_P(XRayDaemonTest, VerifyUdpPacketContents) { + NiceMock config_; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); absl::flat_hash_map aws_metadata; NiceMock server; Network::Test::UdpSyncPeer xray_fake_daemon(GetParam()); @@ -486,8 +498,8 @@ TEST_P(XRayDaemonTest, VerifyUdpPacketContents) { Tracer tracer{"my_segment", "origin", aws_metadata, std::make_unique(daemon_endpoint), server.timeSource(), server.api().randomGenerator()}; - auto span = tracer.startSpan("ingress" /*operation name*/, server.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, "ingress" /*operation name*/, + server.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpStatusCode, "202"); span->finishSpan(); diff --git a/test/extensions/tracers/xray/xray_tracer_impl_test.cc b/test/extensions/tracers/xray/xray_tracer_impl_test.cc index ba64f355aa97b..c28faf5f84580 100644 --- a/test/extensions/tracers/xray/xray_tracer_impl_test.cc +++ b/test/extensions/tracers/xray/xray_tracer_impl_test.cc @@ -61,7 +61,7 @@ TEST_F(XRayDriverTest, XRayTraceHeaderSampled) { } TEST_F(XRayDriverTest, XRayTraceHeaderSamplingUnknown) { - request_headers_.addCopy(XRayTraceHeader, "Root=1-272793;Parent=5398ad8"); + request_headers_.addCopy(XRayTraceHeader, "Root=1-272793;Parent=5398ad8;Sampled="); XRayConfiguration config{"" /*daemon_endpoint*/, "test_segment_name", "" /*sampling_rules*/, "" /*origin*/, aws_metadata_}; diff --git a/test/extensions/tracers/zipkin/config_test.cc b/test/extensions/tracers/zipkin/config_test.cc index 6c95bf964b4e4..1d7f222d5f5c2 100644 --- a/test/extensions/tracers/zipkin/config_test.cc +++ b/test/extensions/tracers/zipkin/config_test.cc @@ -67,11 +67,12 @@ TEST(ZipkinTracerConfigTest, ZipkinHttpTracerWithTypedConfig) { EXPECT_NE(nullptr, zipkin_tracer); } -// Test that the deprecated extension name still functions. +// Test that the deprecated extension name is disabled by default. +// TODO(zuercher): remove when envoy.deprecated_features.allow_deprecated_extension_names is removed TEST(ZipkinTracerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.zipkin"; - ASSERT_NE(nullptr, Registry::FactoryRegistry::getFactory( + ASSERT_EQ(nullptr, Registry::FactoryRegistry::getFactory( deprecated_name)); } diff --git a/test/extensions/tracers/zipkin/span_buffer_test.cc b/test/extensions/tracers/zipkin/span_buffer_test.cc index 92c8a7960b864..ed1d717edeeff 100644 --- a/test/extensions/tracers/zipkin/span_buffer_test.cc +++ b/test/extensions/tracers/zipkin/span_buffer_test.cc @@ -464,7 +464,7 @@ TEST(ZipkinSpanBufferTest, TestSerializeTimestampInTheFuture) { TEST(ZipkinSpanBufferTest, TestDeprecationOfHttpJsonV1) { EXPECT_THROW_WITH_MESSAGE( SpanBuffer buffer1( - envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1, false), + envoy::config::trace::v3::ZipkinConfig::DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE, false), Envoy::EnvoyException, "hidden_envoy_deprecated_HTTP_JSON_V1 has been deprecated. Please use a non-default " "envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion value."); diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index 5e2de45ee3819..0967e77d25b05 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -813,7 +813,7 @@ TEST_F(SslServerContextImplTicketTest, TicketKeySdsNotReady) { NiceMock cluster_manager; NiceMock init_manager; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); // EXPECT_CALL(factory_context_, random()).WillOnce(ReturnRef(random)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, clusterManager()).WillOnce(ReturnRef(cluster_manager)); @@ -1219,7 +1219,7 @@ TEST_F(ClientContextConfigImplTest, SecretNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); sds_secret_configs->set_name("abc.com"); @@ -1251,7 +1251,7 @@ TEST_F(ClientContextConfigImplTest, ValidationContextNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); sds_secret_configs->set_name("abc.com"); @@ -1557,7 +1557,7 @@ TEST_F(ServerContextConfigImplTest, SecretNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); sds_secret_configs->set_name("abc.com"); @@ -1589,7 +1589,7 @@ TEST_F(ServerContextConfigImplTest, ValidationContextNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); sds_secret_configs->set_name("abc.com"); diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index db8cd6b6cec52..c85ae51bfac66 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -658,6 +658,7 @@ void testUtilV2(const TestUtilOptionsV2& options) { ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, server_names); + EXPECT_FALSE(server_ssl_socket_factory.usesProxyProtocolOptions()); Event::DispatcherPtr dispatcher(server_api->allocateDispatcher("test_thread")); auto socket = std::make_shared( @@ -702,9 +703,11 @@ void testUtilV2(const TestUtilOptionsV2& options) { ? options.transportSocketOptions()->serverNameOverride().value() : options.clientCtxProto().sni(); socket->setRequestedServerName(sni); + Network::TransportSocketPtr transport_socket = + server_ssl_socket_factory.createTransportSocket(nullptr); + EXPECT_FALSE(transport_socket->startSecureTransport()); server_connection = dispatcher->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), - stream_info); + std::move(socket), std::move(transport_socket), stream_info); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -4720,7 +4723,7 @@ TEST_P(SslSocketTest, DownstreamNotReadySslSocket) { testing::NiceMock factory_context; NiceMock init_manager; NiceMock dispatcher; - EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); @@ -4760,7 +4763,7 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; auto sds_secret_configs = diff --git a/test/extensions/transport_sockets/tls/utility_test.cc b/test/extensions/transport_sockets/tls/utility_test.cc index 531599c01be59..41ac74b128dc2 100644 --- a/test/extensions/transport_sockets/tls/utility_test.cc +++ b/test/extensions/transport_sockets/tls/utility_test.cc @@ -1,6 +1,7 @@ #include #include +#include "source/common/common/c_smart_ptr.h" #include "source/extensions/transport_sockets/tls/utility.h" #include "test/extensions/transport_sockets/tls/ssl_test_utility.h" @@ -21,6 +22,9 @@ namespace TransportSockets { namespace Tls { namespace { +using X509StoreContextPtr = CSmartPtr; +using X509StorePtr = CSmartPtr; + TEST(UtilityTest, TestGetSubjectAlternateNamesWithDNS) { bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); @@ -164,6 +168,18 @@ TEST(UtilityTest, SslErrorDescriptionTest) { "Unknown BoringSSL error had occurred"); } +TEST(UtilityTest, TestGetX509ErrorInfo) { + auto cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); + X509StoreContextPtr store_ctx = X509_STORE_CTX_new(); + X509StorePtr ssl_ctx = X509_STORE_new(); + EXPECT_TRUE(X509_STORE_CTX_init(store_ctx.get(), ssl_ctx.get(), cert.get(), nullptr)); + X509_STORE_CTX_set_error(store_ctx.get(), X509_V_ERR_UNSPECIFIED); + EXPECT_EQ(Utility::getX509VerificationErrorInfo(store_ctx.get()), + "X509_verify_cert: certificate verification error at depth 0: unknown certificate " + "verification error"); +} + } // namespace } // namespace Tls } // namespace TransportSockets diff --git a/test/extensions/upstreams/http/config_test.cc b/test/extensions/upstreams/http/config_test.cc index cd91d8c55f675..13f16e02be5a0 100644 --- a/test/extensions/upstreams/http/config_test.cc +++ b/test/extensions/upstreams/http/config_test.cc @@ -1,6 +1,7 @@ #include "source/extensions/upstreams/http/config.h" #include "test/mocks/protobuf/mocks.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -45,6 +46,33 @@ TEST(FactoryTest, EmptyProto) { EXPECT_TRUE(factory.createEmptyConfigProto() != nullptr); } +TEST_F(ConfigTest, Auto) { + options_.mutable_auto_config(); + ProtocolOptionsConfigImpl config(options_, validation_visitor_); + EXPECT_FALSE(config.use_downstream_protocol_); + EXPECT_TRUE(config.use_http2_); + EXPECT_FALSE(config.use_http3_); + EXPECT_TRUE(config.use_alpn_); +} + +TEST_F(ConfigTest, AutoHttp3) { + options_.mutable_auto_config(); + options_.mutable_auto_config()->mutable_http3_protocol_options(); + options_.mutable_auto_config()->mutable_alternate_protocols_cache_options(); + ProtocolOptionsConfigImpl config(options_, validation_visitor_); + EXPECT_TRUE(config.use_http2_); + EXPECT_TRUE(config.use_http3_); + EXPECT_TRUE(config.use_alpn_); +} + +TEST_F(ConfigTest, AutoHttp3NoCache) { + options_.mutable_auto_config(); + options_.mutable_auto_config()->mutable_http3_protocol_options(); + EXPECT_THROW_WITH_MESSAGE( + ProtocolOptionsConfigImpl config(options_, validation_visitor_), EnvoyException, + "alternate protocols cache must be configured when HTTP/3 is enabled with auto_config"); +} + } // namespace Http } // namespace Upstreams } // namespace Extensions diff --git a/test/extensions/upstreams/tcp/generic/config_test.cc b/test/extensions/upstreams/tcp/generic/config_test.cc index 2e8a638356db5..dd222bedcc0cc 100644 --- a/test/extensions/upstreams/tcp/generic/config_test.cc +++ b/test/extensions/upstreams/tcp/generic/config_test.cc @@ -7,6 +7,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::AnyNumber; using testing::NiceMock; using testing::Return; @@ -31,6 +32,30 @@ TEST_F(TcpConnPoolTest, TestNoConnPool) { factory_.createGenericConnPool(thread_local_cluster_, config, nullptr, callbacks_)); } +TEST_F(TcpConnPoolTest, Http2Config) { + auto info = std::make_shared(); + EXPECT_CALL(*info, features()).WillOnce(Return(Upstream::ClusterInfo::Features::HTTP2)); + EXPECT_CALL(thread_local_cluster_, info).WillOnce(Return(info)); + envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy_TunnelingConfig config; + config.set_hostname("host"); + EXPECT_CALL(thread_local_cluster_, httpConnPool(_, _, _)).WillOnce(Return(absl::nullopt)); + EXPECT_EQ(nullptr, + factory_.createGenericConnPool(thread_local_cluster_, config, nullptr, callbacks_)); +} + +TEST_F(TcpConnPoolTest, Http3Config) { + auto info = std::make_shared(); + EXPECT_CALL(*info, features()) + .Times(AnyNumber()) + .WillRepeatedly(Return(Upstream::ClusterInfo::Features::HTTP3)); + EXPECT_CALL(thread_local_cluster_, info).Times(AnyNumber()).WillRepeatedly(Return(info)); + envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy_TunnelingConfig config; + config.set_hostname("host"); + EXPECT_CALL(thread_local_cluster_, httpConnPool(_, _, _)).WillOnce(Return(absl::nullopt)); + EXPECT_EQ(nullptr, + factory_.createGenericConnPool(thread_local_cluster_, config, nullptr, callbacks_)); +} + } // namespace Generic } // namespace Tcp } // namespace Upstreams diff --git a/test/extensions/watchdog/profile_action/BUILD b/test/extensions/watchdog/profile_action/BUILD index 9a36ed413146c..18b1f5f938ed0 100644 --- a/test/extensions/watchdog/profile_action/BUILD +++ b/test/extensions/watchdog/profile_action/BUILD @@ -32,7 +32,7 @@ envoy_extension_cc_test( "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/watchdog/profile_action/v3:pkg_cc_proto", ], ) @@ -48,6 +48,6 @@ envoy_extension_cc_test( "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/watchdog/profile_action/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/watchdog/profile_action/config_test.cc b/test/extensions/watchdog/profile_action/config_test.cc index 157128a62af99..22dca56ff3527 100644 --- a/test/extensions/watchdog/profile_action/config_test.cc +++ b/test/extensions/watchdog/profile_action/config_test.cc @@ -1,4 +1,4 @@ -#include "envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h" +#include "envoy/extensions/watchdog/profile_action/v3/profile_action.pb.h" #include "envoy/registry/registry.h" #include "envoy/server/guarddog_config.h" @@ -31,7 +31,7 @@ TEST(ProfileActionFactoryTest, CanCreateAction) { "name": "envoy.watchdog.profile_action", "typed_config": { "@type": "type.googleapis.com/udpa.type.v1.TypedStruct", - "type_url": "type.googleapis.com/envoy.extensions.watchdog.profile_action.v3alpha.ProfileActionConfig", + "type_url": "type.googleapis.com/envoy.extensions.watchdog.profile_action.v3.ProfileActionConfig", "value": { "profile_duration": "2s", "profile_path": "/tmp/envoy/", diff --git a/test/extensions/watchdog/profile_action/profile_action_test.cc b/test/extensions/watchdog/profile_action/profile_action_test.cc index d35bea645dc18..463958aab4a71 100644 --- a/test/extensions/watchdog/profile_action/profile_action_test.cc +++ b/test/extensions/watchdog/profile_action/profile_action_test.cc @@ -3,7 +3,7 @@ #include "envoy/common/time.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/dispatcher.h" -#include "envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h" +#include "envoy/extensions/watchdog/profile_action/v3/profile_action.pb.h" #include "envoy/filesystem/filesystem.h" #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" @@ -92,7 +92,7 @@ class ProfileActionTest : public testing::Test { TEST_F(ProfileActionTest, CanDoSingleProfile) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; config.set_profile_path(test_path_); config.mutable_profile_duration()->set_seconds(1); @@ -132,7 +132,7 @@ TEST_F(ProfileActionTest, CanDoSingleProfile) { TEST_F(ProfileActionTest, CanDoMultipleProfiles) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; config.set_profile_path(test_path_); config.mutable_profile_duration()->set_seconds(1); // Create the ProfileAction before we start running the dispatcher @@ -188,7 +188,7 @@ TEST_F(ProfileActionTest, CanDoMultipleProfiles) { TEST_F(ProfileActionTest, CannotTriggerConcurrentProfiles) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; TestUtility::loadFromJson(absl::Substitute(R"EOF({ "profile_path": "$0", })EOF", test_path_), config); // Create the ProfileAction before we start running the dispatcher @@ -229,7 +229,7 @@ TEST_F(ProfileActionTest, CannotTriggerConcurrentProfiles) { TEST_F(ProfileActionTest, ShouldNotProfileIfDirectoryDoesNotExist) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; const std::string nonexistant_path = test_path_ + "/nonexistant_dir/"; TestUtility::loadFromJson( absl::Substitute(R"EOF({ "profile_path": "$0", })EOF", nonexistant_path), config); @@ -264,7 +264,7 @@ TEST_F(ProfileActionTest, ShouldNotProfileIfDirectoryDoesNotExist) { TEST_F(ProfileActionTest, ShouldNotProfileIfNoTids) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; TestUtility::loadFromJson(absl::Substitute(R"EOF({ "profile_path": "$0"})EOF", test_path_), config); // Create the ProfileAction before we start running the dispatcher @@ -296,7 +296,7 @@ TEST_F(ProfileActionTest, ShouldNotProfileIfNoTids) { TEST_F(ProfileActionTest, ShouldSaturatedMaxProfiles) { // Create configuration that we'll run until it saturates. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; config.set_profile_path(test_path_); config.mutable_profile_duration()->set_seconds(1); config.set_max_profiles(1); @@ -358,7 +358,7 @@ TEST_F(ProfileActionTest, ShouldSaturatedMaxProfiles) { // interfere with an existing profile the action is running. // The successfully captured profile should be updated only if we captured the profile. TEST_F(ProfileActionTest, ShouldUpdateCountersCorrectly) { - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; config.set_profile_path(test_path_); config.mutable_profile_duration()->set_seconds(1); diff --git a/test/fuzz/main.cc b/test/fuzz/main.cc index b339336620de3..ee1a033052509 100644 --- a/test/fuzz/main.cc +++ b/test/fuzz/main.cc @@ -14,6 +14,7 @@ #include "source/common/common/assert.h" #include "source/common/common/logger.h" +#include "source/common/common/thread.h" #include "test/fuzz/fuzz_runner.h" #include "test/test_common/environment.h" @@ -55,6 +56,7 @@ INSTANTIATE_TEST_SUITE_P(CorpusExamples, FuzzerCorpusTest, testing::ValuesIn(tes int main(int argc, char** argv) { Envoy::TestEnvironment::initializeTestMain(argv[0]); + Envoy::Thread::TestThread test_thread; // Expected usage: [other gtest flags] RELEASE_ASSERT(argc >= 2, ""); diff --git a/test/integration/BUILD b/test/integration/BUILD index 247216e7340d5..91adc8dbd772c 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -148,6 +148,21 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "leds_integration_test", + srcs = ["leds_integration_test.cc"], + deps = [ + ":http_integration_lib", + "//test/config:utility_lib", + "//test/test_common:network_utility_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/type/v3:pkg_cc_proto", + ], +) + envoy_proto_library( name = "filter_manager_integration_proto", srcs = [":filter_manager_integration_test.proto"], @@ -230,7 +245,6 @@ envoy_cc_test( ], deps = [ ":http_protocol_integration_lib", - "//source/extensions/filters/http/health_check:config", "//test/test_common:utility_lib", ], ) @@ -361,7 +375,6 @@ envoy_cc_test( "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/health_check:config", "//test/integration/filters:metadata_stop_all_filter_config_lib", "//test/integration/filters:on_local_reply_filter_config_lib", "//test/integration/filters:request_metadata_filter_config_lib", @@ -464,8 +477,7 @@ envoy_cc_test( ], deps = [ ":http_integration_lib", - "//source/extensions/filters/http/health_check:config", - "@envoy_api//envoy/extensions/filters/http/health_check/v3:pkg_cc_proto", + "//test/integration/filters:set_response_code_filter_lib", ], ) @@ -492,7 +504,6 @@ envoy_cc_test_library( ":http_protocol_integration_lib", "//source/common/http:header_map_lib", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/health_check:config", "//test/common/http/http2:http2_frame", "//test/integration/filters:continue_after_local_reply_filter_lib", "//test/integration/filters:continue_headers_only_inject_body", @@ -536,7 +547,6 @@ envoy_cc_test( "//source/common/http:header_map_lib", "//source/extensions/access_loggers/grpc:http_config", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/health_check:config", "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/integration/filters:random_pause_filter_lib", "//test/test_common:utility_lib", @@ -557,7 +567,6 @@ envoy_cc_test( "//source/common/stats:histogram_lib", "//source/common/stats:stats_matcher_lib", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/health_check:config", "//test/common/stats:stat_test_utility_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -750,6 +759,7 @@ envoy_cc_test_library( ":fake_upstream_lib", ":integration_tcp_client_lib", ":utility_lib", + "//source/common/common:thread_lib", "//source/common/config:api_version_lib", "//source/extensions/transport_sockets/tls:context_config_lib", "//source/extensions/transport_sockets/tls:context_lib", @@ -942,8 +952,6 @@ envoy_cc_test( ":http_integration_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", - "//source/extensions/filters/http/grpc_http1_bridge:config", - "//source/extensions/filters/http/health_check:config", "//test/integration/filters:clear_route_cache_filter_lib", "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/integration/filters:invalid_header_filter_lib", @@ -956,7 +964,6 @@ envoy_cc_test( "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", ], ) @@ -969,15 +976,9 @@ envoy_cc_test( deps = [ ":http_protocol_integration_lib", "//source/common/http:header_map_lib", - "//source/extensions/internal_redirect/allow_listed_routes:config", - "//source/extensions/internal_redirect/previous_routes:config", - "//source/extensions/internal_redirect/safe_cross_scheme:config", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto", ], ) @@ -1330,12 +1331,14 @@ envoy_cc_test( envoy_cc_test( name = "tcp_tunneling_integration_test", + size = "large", srcs = [ "tcp_tunneling_integration_test.cc", ], data = [ "//test/config/integration/certs", ], + shard_count = 3, deps = [ ":http_integration_lib", ":http_protocol_integration_lib", @@ -1443,6 +1446,7 @@ envoy_cc_test( deps = [ ":http_integration_lib", ":http_protocol_integration_lib", + "//source/extensions/access_loggers/grpc:http_config", "//source/extensions/filters/listener/tls_inspector:config", "//source/extensions/filters/listener/tls_inspector:tls_inspector_lib", "//source/extensions/filters/network/tcp_proxy:config", @@ -1637,11 +1641,12 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/network:connection_lib", "//source/common/network:utility_lib", - "//source/extensions/filters/http/health_check:config", "//source/extensions/filters/network/tcp_proxy:config", "//test/common/grpc:grpc_client_integration_lib", "//test/config:v2_link_hacks", "//test/integration/filters:address_restore_listener_filter_lib", + "//test/integration/filters:set_response_code_filter_config_proto_cc_proto", + "//test/integration/filters:set_response_code_filter_lib", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", @@ -1740,6 +1745,7 @@ envoy_cc_test( "//test/common/http/http2:http2_frame", "//test/config:v2_link_hacks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) @@ -1834,3 +1840,16 @@ envoy_cc_test( "@envoy_api//envoy/extensions/http/original_ip_detection/custom_header/v3:pkg_cc_proto", ], ) + +envoy_cc_test( + name = "weighted_cluster_integration_test", + srcs = ["weighted_cluster_integration_test.cc"], + deps = [ + ":http_integration_lib", + ":integration_lib", + "//test/integration/filters:repick_cluster_filter_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) diff --git a/test/integration/ads_integration.cc b/test/integration/ads_integration.cc index 53716bfa5e7df..c1758c067006d 100644 --- a/test/integration/ads_integration.cc +++ b/test/integration/ads_integration.cc @@ -60,6 +60,26 @@ AdsIntegrationTest::buildTlsClusterLoadAssignment(const std::string& name) { name, Network::Test::getLoopbackAddressString(ipVersion()), 8443); } +envoy::config::endpoint::v3::ClusterLoadAssignment +AdsIntegrationTest::buildClusterLoadAssignmentWithLeds(const std::string& name, + const std::string& collection_name) { + return ConfigHelper::buildClusterLoadAssignmentWithLeds(name, collection_name); +} + +envoy::service::discovery::v3::Resource +AdsIntegrationTest::buildLbEndpointResource(const std::string& lb_endpoint_resource_name, + const std::string& version) { + envoy::service::discovery::v3::Resource resource; + resource.set_name(lb_endpoint_resource_name); + resource.set_version(version); + + envoy::config::endpoint::v3::LbEndpoint lb_endpoint = + ConfigHelper::buildLbEndpoint(Network::Test::getLoopbackAddressString(ipVersion()), + fake_upstreams_[0]->localAddress()->ip()->port()); + resource.mutable_resource()->PackFrom(lb_endpoint); + return resource; +} + envoy::config::listener::v3::Listener AdsIntegrationTest::buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix) { diff --git a/test/integration/ads_integration.h b/test/integration/ads_integration.h index adc0a66ffe023..65f35397aa9ab 100644 --- a/test/integration/ads_integration.h +++ b/test/integration/ads_integration.h @@ -34,6 +34,12 @@ class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht envoy::config::endpoint::v3::ClusterLoadAssignment buildTlsClusterLoadAssignment(const std::string& name); + envoy::config::endpoint::v3::ClusterLoadAssignment + buildClusterLoadAssignmentWithLeds(const std::string& name, const std::string& collection_name); + + envoy::service::discovery::v3::Resource + buildLbEndpointResource(const std::string& lb_endpoint_resource_name, const std::string& version); + envoy::config::listener::v3::Listener buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix = "ads_test"); diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index f2d0edad3ca84..fe3c1bdc95a5e 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -1656,4 +1656,489 @@ TEST_P(XdsTpAdsIntegrationTest, Basic) { makeSingleRequest(); } +// Basic CDS/EDS/LEDS update that warms and makes active a single cluster. +TEST_P(XdsTpAdsIntegrationTest, BasicWithLeds) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster_resource = buildCluster(cluster_name); + const std::string endpoints_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/baz"; + cluster_resource.mutable_eds_cluster_config()->set_service_name(endpoints_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + const auto leds_resource_prefix = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/"; + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints_name}, {})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints_name, absl::StrCat(leds_resource_prefix, "*"))}, + {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + // Receive LEDS request, and send 2 endpoints. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix, "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + const auto endpoint1_name = absl::StrCat(leds_resource_prefix, "endpoint_0", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name = absl::StrCat(leds_resource_prefix, "endpoint_1", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse( + Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name, "2"), buildLbEndpointResource(endpoint2_name, "2")}, + {}); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {}, {}, {})); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "2", {}, {}, {})); +} + +// CDS/EDS/LEDS update that warms and makes active a single cluster. While +// waiting for LEDS a new EDS update arrives. +TEST_P(XdsTpAdsIntegrationTest, LedsClusterWarmingUpdatingEds) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster_resource = buildCluster(cluster_name); + const std::string endpoints_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/baz"; + cluster_resource.mutable_eds_cluster_config()->set_service_name(endpoints_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + const auto leds_resource_prefix_foo = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/"; + const auto leds_resource_prefix_bar = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/bar-endpoints/"; + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints_name}, {})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints_name, + absl::StrCat(leds_resource_prefix_foo, "*"))}, + {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + // Receive LEDS request, and send an updated EDS response. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix_foo, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints_name, + absl::StrCat(leds_resource_prefix_bar, "*"))}, + {}, "2"); + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {}, {}, {})); + + // Send the old LEDS response, and ensure it is rejected. + const auto endpoint1_name_foo = + absl::StrCat(leds_resource_prefix_foo, "endpoint_0", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name_foo = + absl::StrCat(leds_resource_prefix_foo, "endpoint_1", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse(Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name_foo, "2"), + buildLbEndpointResource(endpoint2_name_foo, "2")}, + {}); + + // Receive the new LEDS request and EDS ack. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix_bar, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {absl::StrCat(leds_resource_prefix_foo, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")})); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "2", {}, {}, {})); + + // Send the new LEDS response + const auto endpoint1_name_bar = + absl::StrCat(leds_resource_prefix_bar, "endpoint_0", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name_bar = + absl::StrCat(leds_resource_prefix_bar, "endpoint_1", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse(Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name_bar, "3"), + buildLbEndpointResource(endpoint2_name_bar, "3")}, + {}); + + // The cluster should be warmed up. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "3", {}, {}, {})); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "2", {}, {}, {})); +} + +// CDS/EDS/LEDS update that warms and makes active a single cluster. While +// waiting for LEDS a new CDS update arrives. +TEST_P(XdsTpAdsIntegrationTest, LedsClusterWarmingUpdatingCds) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster1_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "cluster1?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster1_resource = buildCluster(cluster1_name); + const std::string endpoints1_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/cluster1"; + cluster1_resource.mutable_eds_cluster_config()->set_service_name(endpoints1_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster1_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + const auto leds_resource_prefix1 = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints1/"; + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints1_name}, {})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints1_name, + absl::StrCat(leds_resource_prefix1, "*"))}, + {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + // Receive LEDS request, and send an updated CDS response (removing previous + // cluster and adding a new one). + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix1, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + const std::string cluster2_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "cluster2?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster2_resource = buildCluster(cluster2_name); + const std::string endpoints2_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/cluster2"; + cluster2_resource.mutable_eds_cluster_config()->set_service_name(endpoints2_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster2_resource}, + {cluster1_name}, "2"); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {}, {}, {})); + + // Send the old LEDS response. + const auto endpoint1_name_cluster1 = absl::StrCat( + leds_resource_prefix1, "endpoint_0", "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name_cluster1 = absl::StrCat( + leds_resource_prefix1, "endpoint_1", "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse( + Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name_cluster1, "2"), + buildLbEndpointResource(endpoint2_name_cluster1, "2")}, + {}); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + const auto leds_resource_prefix2 = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints2/"; + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints2_name}, {endpoints1_name})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints2_name, + absl::StrCat(leds_resource_prefix2, "*"))}, + {}, "2"); + + // The server should remove interest in the old LEDS. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, {}, + {absl::StrCat(leds_resource_prefix1, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")})); + + // Receive CDS ack. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "2", {}, {}, {})); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "2", {}, {}, {})); + + // Receive the new LEDS request and EDS ack. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix2, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "2", {}, {}, {})); + + // Send 2 endpoints using LEDS. + const auto endpoint1_name_cluster2 = absl::StrCat( + leds_resource_prefix2, "endpoint_0", "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name_cluster2 = absl::StrCat( + leds_resource_prefix2, "endpoint_1", "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse( + Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name_cluster2, "2"), + buildLbEndpointResource(endpoint2_name_cluster2, "2")}, + {}); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "2", {}, {}, {})); +} + +// Timeout on LEDS update activates the cluster. +TEST_P(XdsTpAdsIntegrationTest, LedsTimeout) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster_resource = buildCluster(cluster_name); + const std::string endpoints_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/baz"; + cluster_resource.mutable_eds_cluster_config()->set_service_name(endpoints_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints_name}, {})); + const auto leds_resource_prefix = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/"; + + auto cla_with_leds = + buildClusterLoadAssignmentWithLeds(endpoints_name, absl::StrCat(leds_resource_prefix, "*")); + // Set a short timeout for the initial fetch. + cla_with_leds.mutable_endpoints(0) + ->mutable_leds_cluster_locality_config() + ->mutable_leds_config() + ->mutable_initial_fetch_timeout() + ->set_nanos(100 * 1000 * 1000); + sendDiscoveryResponse( + eds_type_url, {}, {cla_with_leds}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + // Receive LEDS request, and wait for the initial fetch timeout. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix, "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + + // The cluster should be warming. Wait until initial fetch timeout. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + test_server_->waitForCounterEq( + "cluster.xdstp_//test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name.leds.init_fetch_timeout", + 1); + + // After timeout the cluster should be active, not warming. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {}, {}, {})); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); +} + +// Modifying a cluster to alternate use of EDS with and without LEDS. +TEST_P(XdsTpAdsIntegrationTest, EdsAlternatingLedsUsage) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster_resource = buildCluster(cluster_name); + const std::string endpoints_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/baz"; + cluster_resource.mutable_eds_cluster_config()->set_service_name(endpoints_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, + // that doesn't use LEDS. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", {}, + {endpoints_name}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {}, + {buildClusterLoadAssignment(endpoints_name)}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "1", {}, {}, {})); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); + const std::string route_name_0 = + "xdstp://test/envoy.config.route.v3.RouteConfiguration/route_config_0"; + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {}, + {buildListener("xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "bar?xds.node.cluster=cluster_name&xds.node.id=node_name", + route_name_0)}, + {}, "1"); + + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "", {}, + {route_name_0}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {}, {buildRouteConfig(route_name_0, cluster_name)}, + {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", {}, {}, {})); + + test_server_->waitForCounterEq("listener_manager.listener_create_success", 1); + makeSingleRequest(); + + // Send a new EDS update that uses LEDS. + const auto leds_resource_prefix = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/"; + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints_name, absl::StrCat(leds_resource_prefix, "*"))}, + {}, "2"); + + // Receive LEDS request. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix, "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + + // Make sure that traffic can still be sent to the endpoint (still using the + // EDS without LEDS). + makeSingleRequest(); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "2", {}, {}, {})); + + // Send LEDS response with 2 endpoints. + const auto endpoint1_name = absl::StrCat(leds_resource_prefix, "endpoint_0", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name = absl::StrCat(leds_resource_prefix, "endpoint_1", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse( + Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name, "1"), buildLbEndpointResource(endpoint2_name, "1")}, + {}); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "1", {}, {}, {})); + + // Make sure that traffic can still be sent to the endpoint (now using the + // EDS with LEDS). + makeSingleRequest(); + + // Send a new EDS update that doesn't use LEDS. + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {}, + {buildClusterLoadAssignment(endpoints_name)}, {}, "3"); + + // The server should remove interest in the old LEDS. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, {}, + {absl::StrCat(leds_resource_prefix, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")})); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "3", {}, {}, {})); + + // Remove the LEDS endpoints. + sendExplicitResourcesDeltaDiscoveryResponse(Config::TypeUrl::get().LbEndpoint, {}, + {endpoint1_name, endpoint2_name}); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "3", {}, {}, {})); + + // Make sure that traffic can still be sent to the endpoint (now using the + // EDS without LEDS). + makeSingleRequest(); +} + } // namespace Envoy diff --git a/test/integration/base_integration_test.cc b/test/integration/base_integration_test.cc index 5b870e5e5cf47..8ac28e51f53c6 100644 --- a/test/integration/base_integration_test.cc +++ b/test/integration/base_integration_test.cc @@ -17,7 +17,6 @@ #include "source/common/common/assert.h" #include "source/common/common/fmt.h" -#include "source/common/common/thread.h" #include "source/common/config/api_version.h" #include "source/common/event/libevent.h" #include "source/common/network/utility.h" @@ -94,7 +93,6 @@ Network::ClientConnectionPtr BaseIntegrationTest::makeClientConnectionWithOption } void BaseIntegrationTest::initialize() { - Thread::MainThread::initTestThread(); RELEASE_ASSERT(!initialized_, ""); RELEASE_ASSERT(Event::Libevent::Global::initialized(), ""); initialized_ = true; @@ -332,10 +330,12 @@ std::string getListenerDetails(Envoy::Server::Instance& server) { void BaseIntegrationTest::createGeneratedApiTestServer( const std::string& bootstrap_path, const std::vector& port_names, Server::FieldValidationConfig validator_config, bool allow_lds_rejection) { + test_server_ = IntegrationTestServer::create( - bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, deterministic_, - timeSystem(), *api_, defer_listener_finalization_, process_object_, validator_config, - concurrency_, drain_time_, drain_strategy_, proxy_buffer_factory_, use_real_stats_); + bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, + deterministic_value_, timeSystem(), *api_, defer_listener_finalization_, process_object_, + validator_config, concurrency_, drain_time_, drain_strategy_, proxy_buffer_factory_, + use_real_stats_); if (config_helper_.bootstrap().static_resources().listeners_size() > 0 && !defer_listener_finalization_) { @@ -572,6 +572,21 @@ AssertionResult BaseIntegrationTest::waitForPortAvailable(uint32_t port, return AssertionFailure() << "Timeout waiting for port availability"; } +envoy::service::discovery::v3::DeltaDiscoveryResponse +BaseIntegrationTest::createExplicitResourcesDeltaDiscoveryResponse( + const std::string& type_url, + const std::vector& added_or_updated, + const std::vector& removed) { + envoy::service::discovery::v3::DeltaDiscoveryResponse response; + response.set_system_version_info("system_version_info_this_is_a_test"); + response.set_type_url(type_url); + *response.mutable_resources() = {added_or_updated.begin(), added_or_updated.end()}; + *response.mutable_removed_resources() = {removed.begin(), removed.end()}; + static int next_nonce_counter = 0; + response.set_nonce(absl::StrCat("nonce", next_nonce_counter++)); + return response; +} + AssertionResult BaseIntegrationTest::compareDeltaDiscoveryRequest( const std::string& expected_type_url, const std::vector& expected_resource_subscriptions, diff --git a/test/integration/base_integration_test.h b/test/integration/base_integration_test.h index d629b0572ff35..e935ef74d30a9 100644 --- a/test/integration/base_integration_test.h +++ b/test/integration/base_integration_test.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -8,6 +9,7 @@ #include "envoy/server/process_context.h" #include "envoy/service/discovery/v3/discovery.pb.h" +#include "source/common/common/thread.h" #include "source/common/config/api_version.h" #include "source/extensions/transport_sockets/tls/context_manager_impl.h" @@ -69,7 +71,7 @@ class BaseIntegrationTest : protected Logger::Loggable { // configuration generated in ConfigHelper::finalize. void skipPortUsageValidation() { config_helper_.skipPortUsageValidation(); } // Make test more deterministic by using a fixed RNG value. - void setDeterministic() { deterministic_ = true; } + void setDeterministicValue(uint64_t value = 0) { deterministic_value_ = value; } Http::CodecType upstreamProtocol() const { return upstream_config_.upstream_protocol_; } @@ -204,29 +206,41 @@ class BaseIntegrationTest : protected Logger::Loggable { stream->sendGrpcMessage(response); } + // Sends a DeltaDiscoveryResponse with a given list of added resources. + // Note that the resources are expected to be of the same type, and match type_url. + void sendExplicitResourcesDeltaDiscoveryResponse( + const std::string& type_url, + const std::vector& added_or_updated, + const std::vector& removed) { + xds_stream_->sendGrpcMessage( + createExplicitResourcesDeltaDiscoveryResponse(type_url, added_or_updated, removed)); + } + + envoy::service::discovery::v3::DeltaDiscoveryResponse + createExplicitResourcesDeltaDiscoveryResponse( + const std::string& type_url, + const std::vector& added_or_updated, + const std::vector& removed); + template envoy::service::discovery::v3::DeltaDiscoveryResponse createDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, const std::vector& removed, const std::string& version, const std::vector& aliases) { - envoy::service::discovery::v3::DeltaDiscoveryResponse response; - response.set_system_version_info("system_version_info_this_is_a_test"); - response.set_type_url(type_url); + std::vector resources; for (const auto& message : added_or_updated) { - auto* resource = response.add_resources(); + envoy::service::discovery::v3::Resource resource; ProtobufWkt::Any temp_any; temp_any.PackFrom(message); - resource->mutable_resource()->PackFrom(message); - resource->set_name(intResourceName(message)); - resource->set_version(version); + resource.mutable_resource()->PackFrom(message); + resource.set_name(intResourceName(message)); + resource.set_version(version); for (const auto& alias : aliases) { - resource->add_aliases(alias); + resource.add_aliases(alias); } + resources.emplace_back(resource); } - *response.mutable_removed_resources() = {removed.begin(), removed.end()}; - static int next_nonce_counter = 0; - response.set_nonce(absl::StrCat("nonce", next_nonce_counter++)); - return response; + return createExplicitResourcesDeltaDiscoveryResponse(type_url, resources, removed); } private: @@ -253,8 +267,10 @@ class BaseIntegrationTest : protected Logger::Loggable { * * @param port the port to connect to. * @param raw_http the data to send. - * @param response the response data will be sent here - * @param if the connection should be terminated once '\r\n\r\n' has been read. + * @param response the response data will be sent here. + * @param disconnect_after_headers_complete if the connection should be terminated once "\r\n\r\n" + * has been read. + * @param transport_socket the transport socket of the created client connection. **/ void sendRawHttpAndWaitForResponse(int port, const char* raw_http, std::string* response, bool disconnect_after_headers_complete = false, @@ -348,6 +364,8 @@ class BaseIntegrationTest : protected Logger::Loggable { std::unique_ptr upstream_stats_store_; + Thread::TestThread test_thread_; + // Make sure the test server will be torn down after any fake client. // The test server owns the runtime, which is often accessed by client and // fake upstream codecs and must outlast them. @@ -415,8 +433,9 @@ class BaseIntegrationTest : protected Logger::Loggable { // This does nothing if autonomous_upstream_ is false bool autonomous_allow_incomplete_streams_{false}; - // True if test will use a fixed RNG value. - bool deterministic_{}; + // If this member is not empty, the test will use a fixed RNG value specified + // by it. + absl::optional deterministic_value_{}; // Set true when your test will itself take care of ensuring listeners are up, and registering // them in the port_map_. diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index 3da9db2c5e897..bbffa27e9569c 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -175,6 +175,40 @@ TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) { cleanupUpstreamAndDownstream(); } +// Test the fast addition and removal of clusters when they use ThreadAwareLb. +TEST_P(CdsIntegrationTest, CdsClusterWithThreadAwareLbCycleUpDownUp) { + // Calls our initialize(), which includes establishing a listener, route, and cluster. + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + test_server_->waitForCounterGe("cluster_manager.cluster_added", 1); + + // Tell Envoy that cluster_1 is gone. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {})); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, {}, {}, + {ClusterName1}, "42"); + // Make sure that Envoy's ClusterManager has made use of the DiscoveryResponse that says cluster_1 + // is gone. + test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1); + + // Update cluster1_ to use MAGLEV load balancer policy. + cluster1_ = ConfigHelper::buildStaticCluster( + ClusterName1, fake_upstreams_[UpstreamIndex1]->localAddress()->ip()->port(), + Network::Test::getLoopbackAddressString(ipVersion()), "MAGLEV"); + + // Cyclically add and remove cluster with ThreadAwareLb. + for (int i = 42; i < 142; i += 2) { + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Cluster, absl::StrCat(i), {}, {}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {cluster1_}, {cluster1_}, {}, absl::StrCat(i + 1)); + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Cluster, absl::StrCat(i + 1), {}, {}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {}, {}, {ClusterName1}, absl::StrCat(i + 2)); + } + + cleanupUpstreamAndDownstream(); +} + // Tests adding a cluster, adding another, then removing the first. TEST_P(CdsIntegrationTest, TwoClusters) { // Calls our initialize(), which includes establishing a listener, route, and cluster. diff --git a/test/integration/clusters/custom_static_cluster.h b/test/integration/clusters/custom_static_cluster.h index 5570338047634..6daeb9220eda4 100644 --- a/test/integration/clusters/custom_static_cluster.h +++ b/test/integration/clusters/custom_static_cluster.h @@ -29,7 +29,7 @@ class CustomStaticCluster : public Upstream::ClusterImplBase { Stats::ScopePtr&& stats_scope, bool added_via_api, uint32_t priority, std::string address, uint32_t port) : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, - factory_context.dispatcher().timeSource()), + factory_context.mainThreadDispatcher().timeSource()), priority_(priority), address_(std::move(address)), port_(port), host_(makeHost()) {} InitializePhase initializePhase() const override { return InitializePhase::Primary; } diff --git a/test/integration/drain_close_integration_test.cc b/test/integration/drain_close_integration_test.cc index 65b84f28bf0e9..f0abdb4dbcbdc 100644 --- a/test/integration/drain_close_integration_test.cc +++ b/test/integration/drain_close_integration_test.cc @@ -5,13 +5,12 @@ namespace { using DrainCloseIntegrationTest = HttpProtocolIntegrationTest; -// Add a health check filter and verify correct behavior when draining. TEST_P(DrainCloseIntegrationTest, DrainCloseGradual) { + autonomous_upstream_ = true; // The probability of drain close increases over time. With a high timeout, // the probability will be very low, but the rapid retries prevent this from // increasing total test time. drain_time_ = std::chrono::seconds(100); - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); absl::Notification drain_sequence_started; @@ -43,9 +42,9 @@ TEST_P(DrainCloseIntegrationTest, DrainCloseGradual) { } TEST_P(DrainCloseIntegrationTest, DrainCloseImmediate) { + autonomous_upstream_ = true; drain_strategy_ = Server::DrainStrategy::Immediate; drain_time_ = std::chrono::seconds(100); - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); absl::Notification drain_sequence_started; diff --git a/test/integration/fake_resource_monitor.cc b/test/integration/fake_resource_monitor.cc index 9dc8e4b0060ab..85a11084f6e14 100644 --- a/test/integration/fake_resource_monitor.cc +++ b/test/integration/fake_resource_monitor.cc @@ -16,7 +16,7 @@ void FakeResourceMonitorFactory::onMonitorDestroyed(FakeResourceMonitor* monitor } Server::ResourceMonitorPtr FakeResourceMonitorFactory::createResourceMonitor( const Protobuf::Message&, Server::Configuration::ResourceMonitorFactoryContext& context) { - auto monitor = std::make_unique(context.dispatcher(), *this); + auto monitor = std::make_unique(context.mainThreadDispatcher(), *this); monitor_ = monitor.get(); return monitor; } diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index a64ba2dab4b15..54a5f56f531e5 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -570,6 +570,7 @@ struct FakeUpstreamConfig { // Legacy options which are always set. http2_options_.set_allow_connect(true); http2_options_.set_allow_metadata(true); + http3_options_.set_allow_extended_connect(true); } Event::TestTimeSystem& time_system_; diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index c8e796b6c89a0..79e08f2533a4b 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -599,3 +599,22 @@ envoy_cc_test_library( "//test/extensions/filters/http/common:empty_http_filter_config_lib", ], ) + +envoy_cc_test_library( + name = "repick_cluster_filter_lib", + srcs = [ + "repick_cluster_filter.cc", + ], + hdrs = [ + "repick_cluster_filter.h", + ], + deps = [ + ":common_lib", + "//envoy/http:filter_interface", + "//envoy/registry", + "//envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + "@com_google_absl//absl/strings:str_format", + ], +) diff --git a/test/integration/filters/address_restore_listener_filter.cc b/test/integration/filters/address_restore_listener_filter.cc index 84cd33d2c14e7..ed1605924d8dc 100644 --- a/test/integration/filters/address_restore_listener_filter.cc +++ b/test/integration/filters/address_restore_listener_filter.cc @@ -10,14 +10,22 @@ namespace Envoy { // The FakeOriginalDstListenerFilter restore desired local address without the dependency of OS. +// Ipv6 and Ipv4 addresses are restored to the corresponding loopback ip address and port 80. class FakeOriginalDstListenerFilter : public Network::ListenerFilter { public: // Network::ListenerFilter Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override { FANCY_LOG(debug, "in FakeOriginalDstListenerFilter::onAccept"); Network::ConnectionSocket& socket = cb.socket(); - socket.connectionInfoProvider().restoreLocalAddress( - std::make_shared("127.0.0.2", 80)); + auto local_address = socket.connectionInfoProvider().localAddress(); + if (local_address != nullptr && + local_address->ip()->version() == Network::Address::IpVersion::v6) { + socket.connectionInfoProvider().restoreLocalAddress( + std::make_shared("::1", 80)); + } else { + socket.connectionInfoProvider().restoreLocalAddress( + std::make_shared("127.0.0.1", 80)); + } FANCY_LOG(debug, "current local socket address is {} restored = {}", socket.connectionInfoProvider().localAddress()->asString(), socket.connectionInfoProvider().localAddressRestored()); diff --git a/test/integration/filters/repick_cluster_filter.cc b/test/integration/filters/repick_cluster_filter.cc new file mode 100644 index 0000000000000..40b5e25bf88c6 --- /dev/null +++ b/test/integration/filters/repick_cluster_filter.cc @@ -0,0 +1,48 @@ +#include "test/integration/filters/repick_cluster_filter.h" + +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "source/extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" + +#include "absl/strings/str_format.h" + +namespace Envoy { +namespace RepickClusterFilter { + +// A test filter that modifies the request header (i.e. map the cluster header +// to cluster name), clear the route cache. +class RepickClusterFilter : public Http::PassThroughFilter { +public: + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& request_header, bool) override { + request_header.addCopy(Envoy::Http::LowerCaseString(ClusterHeaderName), ClusterName); + decoder_callbacks_->clearRouteCache(); + return Http::FilterHeadersStatus::Continue; + } +}; + +class RepickClusterFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig { +public: + RepickClusterFilterConfig() : EmptyHttpFilterConfig("repick-cluster-filter") {} + + Http::FilterFactoryCb createFilter(const std::string&, + Server::Configuration::FactoryContext&) override { + return [](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter( + std::make_shared<::Envoy::RepickClusterFilter::RepickClusterFilter>()); + }; + } +}; + +// Perform static registration +static Registry::RegisterFactory + register_; + +} // namespace RepickClusterFilter +} // namespace Envoy diff --git a/test/integration/filters/repick_cluster_filter.h b/test/integration/filters/repick_cluster_filter.h new file mode 100644 index 0000000000000..e0e5741572581 --- /dev/null +++ b/test/integration/filters/repick_cluster_filter.h @@ -0,0 +1,15 @@ +#pragma once + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace RepickClusterFilter { + +// The cluster names and cluster headers here need to be correlated correctly with +// weighted_cluster_integration_test or any other end users, to make sure that header modifications +// have been done on the correct target. So they are declared and defined here. +inline constexpr absl::string_view ClusterName = "cluster_1"; +inline constexpr absl::string_view ClusterHeaderName = "cluster_header_1"; + +} // namespace RepickClusterFilter +} // namespace Envoy diff --git a/test/integration/health_check_integration_test.cc b/test/integration/health_check_integration_test.cc index dcd6efdee9156..ed44be1f05954 100644 --- a/test/integration/health_check_integration_test.cc +++ b/test/integration/health_check_integration_test.cc @@ -1,6 +1,7 @@ #include #include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/type/v3/range.pb.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/common/http/http2/http2_frame.h" @@ -170,7 +171,8 @@ class HttpHealthCheckIntegrationTestBase // Adds a HTTP active health check specifier to the given cluster, and waits for the first health // check probe to be received. - void initHttpHealthCheck(uint32_t cluster_idx) { + void initHttpHealthCheck(uint32_t cluster_idx, int unhealthy_threshold = 1, + std::unique_ptr retriable_range = nullptr) { const envoy::type::v3::CodecClientType codec_client_type = (Http::CodecType::HTTP1 == upstream_protocol_) ? envoy::type::v3::CodecClientType::HTTP1 : envoy::type::v3::CodecClientType::HTTP2; @@ -179,6 +181,12 @@ class HttpHealthCheckIntegrationTestBase auto* health_check = addHealthCheck(cluster_data.cluster_); health_check->mutable_http_health_check()->set_path("/healthcheck"); health_check->mutable_http_health_check()->set_codec_client_type(codec_client_type); + health_check->mutable_unhealthy_threshold()->set_value(unhealthy_threshold); + if (retriable_range != nullptr) { + auto* range = health_check->mutable_http_health_check()->add_retriable_statuses(); + range->set_start(retriable_range->start()); + range->set_end(retriable_range->end()); + } // Introduce the cluster using compareDiscoveryRequest / sendDiscoveryResponse. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); @@ -246,6 +254,117 @@ TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointUnhealthyHttp) { EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); } +// Tests that a retriable status response does not mark endpoint unhealthy until threshold is +// reached +TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointUnhealthyThresholdHttp) { + const uint32_t cluster_idx = 0; + initialize(); + auto retriable_range = std::make_unique(); + retriable_range->set_start(400); + retriable_range->set_end(401); + initHttpHealthCheck(cluster_idx, 2, std::move(retriable_range)); + + // Responds with healthy status. + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 1); + test_server_->waitForCounterEq("cluster.cluster_1.health_check.success", 1); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); + test_server_->waitForGaugeEq("cluster.cluster_1.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); + + // Wait until the next attempt is made. + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 2); + + // Respond with retriable status + ASSERT_TRUE(clusters_[cluster_idx].host_fake_connection_->waitForNewStream( + *dispatcher_, clusters_[cluster_idx].host_stream_)); + ASSERT_TRUE(clusters_[cluster_idx].host_stream_->waitForEndStream(*dispatcher_)); + + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getHostValue(), + clusters_[cluster_idx].name_); + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "400"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for second health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.failure", 1); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_healthy")->value()); + + // Wait until the next attempt is made. + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 3); + + // Respond with retriable status a second time, matching unhealthy threshold + ASSERT_TRUE(clusters_[cluster_idx].host_fake_connection_->waitForNewStream( + *dispatcher_, clusters_[cluster_idx].host_stream_)); + ASSERT_TRUE(clusters_[cluster_idx].host_stream_->waitForEndStream(*dispatcher_)); + + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getHostValue(), + clusters_[cluster_idx].name_); + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "400"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for third health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.failure", 2); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + test_server_->waitForGaugeEq("cluster.cluster_1.membership_healthy", 0); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); + + // Wait until the next attempt is made. + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 4); + + // Respond with healthy status again. + ASSERT_TRUE(clusters_[cluster_idx].host_fake_connection_->waitForNewStream( + *dispatcher_, clusters_[cluster_idx].host_stream_)); + ASSERT_TRUE(clusters_[cluster_idx].host_stream_->waitForEndStream(*dispatcher_)); + + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getHostValue(), + clusters_[cluster_idx].name_); + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for fourth health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.success", 2); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); + test_server_->waitForGaugeEq("cluster.cluster_1.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); +} + +// Tests that expected statuses takes precedence over retriable statuses +TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointExpectedAndRetriablePrecedence) { + const uint32_t cluster_idx = 0; + initialize(); + auto retriable_range = std::make_unique(); + retriable_range->set_start(200); + retriable_range->set_end(201); + initHttpHealthCheck(cluster_idx, 2, std::move(retriable_range)); + + // Responds with healthy status. + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 1); + test_server_->waitForCounterEq("cluster.cluster_1.health_check.success", 1); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); + test_server_->waitForGaugeEq("cluster.cluster_1.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); +} + // Verify that immediate health check fail causes cluster exclusion. TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointImmediateHealthcheckFailHttp) { const uint32_t cluster_idx = 0; diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 4403d39b23171..dac1ce06c78db 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -257,6 +257,7 @@ IntegrationCodecClientPtr HttpIntegrationTest::makeRawHttpConnection( } else { cluster->http3_options_ = ConfigHelper::http2ToHttp3ProtocolOptions( http2_options.value(), quic::kStreamReceiveWindowLimit); + cluster->http3_options_.set_allow_extended_connect(true); #endif } cluster->http2_options_ = http2_options.value(); @@ -381,8 +382,8 @@ ConfigHelper::ConfigModifierFunction HttpIntegrationTest::setEnableUpstreamTrail IntegrationStreamDecoderPtr HttpIntegrationTest::sendRequestAndWaitForResponse( const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size, - const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_size, - int upstream_index, std::chrono::milliseconds timeout) { + const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_body_size, + const std::vector& upstream_indices, std::chrono::milliseconds timeout) { ASSERT(codec_client_ != nullptr); // Send the request to Envoy. IntegrationStreamDecoderPtr response; @@ -391,18 +392,27 @@ IntegrationStreamDecoderPtr HttpIntegrationTest::sendRequestAndWaitForResponse( } else { response = codec_client_->makeHeaderOnlyRequest(request_headers); } - waitForNextUpstreamRequest(upstream_index, timeout); + waitForNextUpstreamRequest(upstream_indices, timeout); // Send response headers, and end_stream if there is no response body. - upstream_request_->encodeHeaders(response_headers, response_size == 0); + upstream_request_->encodeHeaders(response_headers, response_body_size == 0); // Send any response data, with end_stream true. - if (response_size) { - upstream_request_->encodeData(response_size, true); + if (response_body_size) { + upstream_request_->encodeData(response_body_size, true); } // Wait for the response to be read by the codec client. RELEASE_ASSERT(response->waitForEndStream(timeout), "unexpected timeout"); return response; } +IntegrationStreamDecoderPtr HttpIntegrationTest::sendRequestAndWaitForResponse( + const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size, + const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_body_size, + uint64_t upstream_index, std::chrono::milliseconds timeout) { + return sendRequestAndWaitForResponse(request_headers, request_body_size, response_headers, + response_body_size, std::vector{upstream_index}, + timeout); +} + void HttpIntegrationTest::cleanupUpstreamAndDownstream() { // Close the upstream connection first. If there's an outstanding request, // closing the client may result in a FIN being sent upstream, and FakeConnectionBase::close @@ -1404,6 +1414,63 @@ void HttpIntegrationTest::testAdminDrain(Http::CodecType admin_request_type) { } } +void HttpIntegrationTest::simultaneousRequest(uint32_t request1_bytes, uint32_t request2_bytes, + uint32_t response1_bytes, uint32_t response2_bytes) { + FakeStreamPtr upstream_request1; + FakeStreamPtr upstream_request2; + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Start request 1 + auto encoder_decoder1 = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + Http::RequestEncoder* encoder1 = &encoder_decoder1.first; + auto response1 = std::move(encoder_decoder1.second); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request1)); + + // Start request 2 + auto encoder_decoder2 = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + Http::RequestEncoder* encoder2 = &encoder_decoder2.first; + auto response2 = std::move(encoder_decoder2.second); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request2)); + + // Finish request 1 + codec_client_->sendData(*encoder1, request1_bytes, true); + ASSERT_TRUE(upstream_request1->waitForEndStream(*dispatcher_)); + + // Finish request 2 + codec_client_->sendData(*encoder2, request2_bytes, true); + ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_)); + + // Respond to request 2 + upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request2->encodeData(response2_bytes, true); + ASSERT_TRUE(response2->waitForEndStream()); + EXPECT_TRUE(upstream_request2->complete()); + EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); + EXPECT_TRUE(response2->complete()); + EXPECT_EQ("200", response2->headers().getStatusValue()); + EXPECT_EQ(response2_bytes, response2->body().size()); + + // Respond to request 1 + upstream_request1->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request1->encodeData(response1_bytes, true); + ASSERT_TRUE(response1->waitForEndStream()); + EXPECT_TRUE(upstream_request1->complete()); + EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); + EXPECT_TRUE(response1->complete()); + EXPECT_EQ("200", response1->headers().getStatusValue()); + EXPECT_EQ(response1_bytes, response1->body().size()); +} + std::string HttpIntegrationTest::downstreamProtocolStatsRoot() const { switch (downstreamProtocol()) { case Http::CodecClient::Type::HTTP1: diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index a872292747a60..6b01dd1b708d4 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -89,13 +89,24 @@ using IntegrationCodecClientPtr = std::unique_ptr; */ class HttpIntegrationTest : public BaseIntegrationTest { public: + HttpIntegrationTest(Http::CodecType downstream_protocol, Network::Address::IpVersion version) + : HttpIntegrationTest( + downstream_protocol, version, + ConfigHelper::httpProxyConfig(/*downstream_use_quic=*/downstream_protocol == + Http::CodecType::HTTP3)) {} HttpIntegrationTest(Http::CodecType downstream_protocol, Network::Address::IpVersion version, - const std::string& config = ConfigHelper::httpProxyConfig()); + const std::string& config); HttpIntegrationTest(Http::CodecType downstream_protocol, const InstanceConstSharedPtrFn& upstream_address_fn, - Network::Address::IpVersion version, - const std::string& config = ConfigHelper::httpProxyConfig()); + Network::Address::IpVersion version) + : HttpIntegrationTest( + downstream_protocol, upstream_address_fn, version, + ConfigHelper::httpProxyConfig(/*downstream_use_quic=*/downstream_protocol == + Http::CodecType::HTTP3)) {} + HttpIntegrationTest(Http::CodecType downstream_protocol, + const InstanceConstSharedPtrFn& upstream_address_fn, + Network::Address::IpVersion version, const std::string& config); ~HttpIntegrationTest() override; void initialize() override; @@ -136,7 +147,13 @@ class HttpIntegrationTest : public BaseIntegrationTest { IntegrationStreamDecoderPtr sendRequestAndWaitForResponse( const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size, const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_body_size, - int upstream_index = 0, std::chrono::milliseconds time = TestUtility::DefaultTimeout); + uint64_t upstream_index = 0, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + + IntegrationStreamDecoderPtr sendRequestAndWaitForResponse( + const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size, + const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_body_size, + const std::vector& upstream_indices, + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); // Wait for the end of stream on the next upstream stream on any of the provided fake upstreams. // Sets fake_upstream_connection_ to the connection and upstream_request_ to stream. @@ -228,6 +245,8 @@ class HttpIntegrationTest : public BaseIntegrationTest { void testEnvoyProxying1xx(bool continue_before_upstream_complete = false, bool with_encoder_filter = false, bool with_multiple_1xx_headers = false); + void simultaneousRequest(uint32_t request1_bytes, uint32_t request2_bytes, + uint32_t response1_bytes, uint32_t response2_bytes); // HTTP/2 client tests. void testDownstreamResetBeforeResponseComplete(); diff --git a/test/integration/http_subset_lb_integration_test.cc b/test/integration/http_subset_lb_integration_test.cc index d32ec0b1a94f7..2158305e8b6f2 100644 --- a/test/integration/http_subset_lb_integration_test.cc +++ b/test/integration/http_subset_lb_integration_test.cc @@ -30,8 +30,7 @@ class HttpSubsetLbIntegrationTest auto policy = static_cast(i); - if (policy == envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB || - policy == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED || + if (policy == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED || policy == envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG) { continue; } diff --git a/test/integration/http_typed_per_filter_config_test.cc b/test/integration/http_typed_per_filter_config_test.cc index 8a2ac7d56b0b9..c30c4fab029e6 100644 --- a/test/integration/http_typed_per_filter_config_test.cc +++ b/test/integration/http_typed_per_filter_config_test.cc @@ -1,5 +1,4 @@ -#include "envoy/extensions/filters/http/health_check/v3/health_check.pb.h" - +#include "test/integration/filters/set_response_code_filter_config.pb.h" #include "test/integration/http_integration.h" #include "gtest/gtest.h" @@ -17,24 +16,23 @@ TEST_F(HTTPTypedPerFilterConfigTest, RejectUnsupportedTypedPerFilterConfig) { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) { - envoy::extensions::filters::http::health_check::v3::HealthCheck health_check; - health_check.mutable_pass_through_mode()->set_value(false); + test::integration::filters::SetResponseCodeFilterConfig response_code; + response_code.set_code(403); - // The http health_check filter doesn't support per filter config. So specify one - // and expect the exception will be raised. auto* virtual_host = hcm.mutable_route_config()->mutable_virtual_hosts(0); auto* config = virtual_host->mutable_typed_per_filter_config(); - (*config)["envoy.filters.http.health_check"].PackFrom(health_check); + (*config)["set-response-code-filter"].PackFrom(response_code); auto* filter = hcm.mutable_http_filters()->Add(); - filter->set_name("envoy.filters.http.health_check"); - filter->mutable_typed_config()->PackFrom(health_check); + filter->set_name("set-response-code-filter"); + filter->mutable_typed_config()->PackFrom(response_code); // keep router the last auto size = hcm.http_filters_size(); hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); }); - EXPECT_DEATH(initialize(), "The filter envoy.filters.http.health_check doesn't support virtual " - "host-specific configurations"); + EXPECT_DEATH( + initialize(), + "The filter set-response-code-filter doesn't support virtual host-specific configurations"); } TEST_F(HTTPTypedPerFilterConfigTest, RejectUnknownHttpFilterInTypedPerFilterConfig) { diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index 46cbe957e6ae5..6d246bf90e5b2 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -35,6 +35,14 @@ class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest { hcm.mutable_request_timeout()->set_seconds(0); hcm.mutable_request_timeout()->set_nanos(RequestTimeoutMs * 1000 * 1000); } + if (enable_per_try_idle_timeout_) { + auto* route_config = hcm.mutable_route_config(); + auto* virtual_host = route_config->mutable_virtual_hosts(0); + auto* route = virtual_host->mutable_routes(0)->mutable_route(); + auto* retry_policy = route->mutable_retry_policy(); + retry_policy->mutable_per_try_idle_timeout()->set_seconds(0); + retry_policy->mutable_per_try_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); + } // For validating encode100ContinueHeaders() timer kick. hcm.set_proxy_100_continue(true); @@ -62,6 +70,26 @@ class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest { return response; } + IntegrationStreamDecoderPtr setupPerTryIdleTimeoutTest(const char* method = "GET") { + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", method}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + AssertionResult result = + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_); + RELEASE_ASSERT(result, result.message()); + result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_); + RELEASE_ASSERT(result, result.message()); + result = upstream_request_->waitForHeadersComplete(); + RELEASE_ASSERT(result, result.message()); + result = upstream_request_->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + return response; + } + void sleep() { test_time_.timeSystem().advanceTimeWait(std::chrono::milliseconds(IdleTimeoutMs / 2)); } @@ -86,6 +114,7 @@ class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest { bool enable_global_idle_timeout_{false}; bool enable_per_stream_idle_timeout_{false}; bool enable_request_timeout_{false}; + bool enable_per_try_idle_timeout_{false}; DangerousDeprecatedTestTime test_time_; }; @@ -275,6 +304,23 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterUpstreamHeaders) { EXPECT_EQ("", response->body()); } +// Per-try idle timeout after upstream headers have been sent. +TEST_P(IdleTimeoutIntegrationTest, PerTryIdleTimeoutAfterUpstreamHeaders) { + enable_per_try_idle_timeout_ = true; + auto response = setupPerTryIdleTimeoutTest(); + + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + + waitForTimeout(*response); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_per_try_idle_timeout", 1); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_FALSE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ("", response->body()); +} + // Per-stream idle timeout after a sequence of header/data events. TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterBidiData) { enable_per_stream_idle_timeout_ = true; diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index 6fdc3bd7467cd..d87aa4b399987 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -34,48 +34,6 @@ INSTANTIATE_TEST_SUITE_P(Protocols, IntegrationAdminTest, {Http::CodecType::HTTP1})), HttpProtocolIntegrationTest::protocolTestParamsToString); -TEST_P(IntegrationAdminTest, HealthCheck) { - initialize(); - - BufferingStreamDecoderPtr response; - EXPECT_EQ("200", request("http", "POST", "/healthcheck", response)); - - EXPECT_EQ("200", request("admin", "POST", "/healthcheck/fail", response)); - EXPECT_EQ("503", request("http", "GET", "/healthcheck", response)); - - EXPECT_EQ("200", request("admin", "POST", "/healthcheck/ok", response)); - EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); -} - -TEST_P(IntegrationAdminTest, HealthCheckWithoutServerStats) { - envoy::config::metrics::v3::StatsMatcher stats_matcher; - stats_matcher.mutable_exclusion_list()->add_patterns()->set_prefix("server."); - initialize(stats_matcher); - - BufferingStreamDecoderPtr response; - EXPECT_EQ("200", request("http", "POST", "/healthcheck", response)); - EXPECT_EQ("200", request("admin", "GET", "/stats", response)); - EXPECT_THAT(response->body(), Not(HasSubstr("server."))); - - EXPECT_EQ("200", request("admin", "POST", "/healthcheck/fail", response)); - EXPECT_EQ("503", request("http", "GET", "/healthcheck", response)); - EXPECT_EQ("200", request("admin", "GET", "/stats", response)); - EXPECT_THAT(response->body(), Not(HasSubstr("server."))); - - EXPECT_EQ("200", request("admin", "POST", "/healthcheck/ok", response)); - EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); - EXPECT_EQ("200", request("admin", "GET", "/stats", response)); - EXPECT_THAT(response->body(), Not(HasSubstr("server."))); -} - -TEST_P(IntegrationAdminTest, HealthCheckWithBufferFilter) { - config_helper_.prependFilter(ConfigHelper::defaultBufferFilter()); - initialize(); - - BufferingStreamDecoderPtr response; - EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); -} - TEST_P(IntegrationAdminTest, AdminLogging) { initialize(); diff --git a/test/integration/integration_admin_test.h b/test/integration/integration_admin_test.h index b190cef6edbaf..7d036b98a7230 100644 --- a/test/integration/integration_admin_test.h +++ b/test/integration/integration_admin_test.h @@ -15,7 +15,6 @@ namespace Envoy { class IntegrationAdminTest : public HttpProtocolIntegrationTest { public: void initialize() override { - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); config_helper_.addConfigModifier( [](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto& hist_settings = @@ -30,14 +29,6 @@ class IntegrationAdminTest : public HttpProtocolIntegrationTest { HttpIntegrationTest::initialize(); } - void initialize(envoy::config::metrics::v3::StatsMatcher stats_matcher) { - config_helper_.addConfigModifier( - [stats_matcher](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - *bootstrap.mutable_stats_config()->mutable_stats_matcher() = stats_matcher; - }); - initialize(); - } - absl::string_view request(const std::string port_key, const std::string method, const std::string endpoint, BufferingStreamDecoderPtr& response) { response = IntegrationUtil::makeSingleRequest(lookupPort(port_key), method, endpoint, "", diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 794a49adda065..06cee2599e1b1 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -4,11 +4,11 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/route/v3/route_components.pb.h" -#include "envoy/extensions/filters/http/grpc_http1_bridge/v3/config.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "source/common/http/header_map_impl.h" #include "source/common/http/headers.h" +#include "source/common/network/socket_option_factory.h" #include "source/common/network/socket_option_impl.h" #include "source/common/network/utility.h" #include "source/common/protobuf/utility.h" @@ -20,6 +20,7 @@ #include "test/mocks/http/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/printers.h" +#include "test/test_common/registry.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -303,15 +304,12 @@ TEST_P(IntegrationTest, RouterDirectResponseEmptyBody) { } TEST_P(IntegrationTest, ConnectionClose) { - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); + autonomous_upstream_ = true; initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = - codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{{":method", "GET"}, - {":path", "/healthcheck"}, - {":authority", "host"}, - {"connection", "close"}}); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/"}, {":authority", "host"}, {"connection", "close"}}); ASSERT_TRUE(response->waitForEndStream()); ASSERT_TRUE(codec_client_->waitForDisconnect()); @@ -622,38 +620,6 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 2); } -// Test hitting the bridge filter with too many response bytes to buffer. Given -// the headers are not proxied, the connection manager will send a local error reply. -TEST_P(IntegrationTest, HittingGrpcFilterLimitBufferingHeaders) { - config_helper_.prependFilter( - "{ name: grpc_http1_bridge, typed_config: { \"@type\": " - "type.googleapis.com/envoy.extensions.filters.http.grpc_http1_bridge.v3.Config } }"); - config_helper_.setBufferLimits(1024, 1024); - initialize(); - codec_client_ = makeHttpConnection(lookupPort("http")); - - auto response = codec_client_->makeHeaderOnlyRequest( - Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}, - {"content-type", "application/grpc"}, - {"x-envoy-retry-grpc-on", "cancelled"}}); - waitForNextUpstreamRequest(); - - // Send the overly large response. Because the grpc_http1_bridge filter buffers and buffer - // limits are exceeded, this will be translated into an unknown gRPC error. - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); - upstream_request_->encodeData(1024 * 65, false); - ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - - ASSERT_TRUE(response->waitForEndStream()); - EXPECT_TRUE(response->complete()); - EXPECT_THAT(response->headers(), HttpStatusIs("200")); - EXPECT_THAT(response->headers(), - HeaderValueOf(Headers::get().GrpcStatus, "2")); // Unknown gRPC error -} - TEST_P(IntegrationTest, TestSmuggling) { initialize(); @@ -1309,17 +1275,8 @@ TEST_P(IntegrationTest, Connect) { EXPECT_EQ(normalizeDate(response1), normalizeDate(response2)); } -// Test that Envoy by default returns HTTP code 502 on upstream protocol error. -TEST_P(IntegrationTest, UpstreamProtocolErrorDefault) { - testRouterUpstreamProtocolError("502", "UPE"); -} - -// Test runtime overwrite to return 503 on upstream protocol error. -TEST_P(IntegrationTest, UpstreamProtocolErrorRuntimeOverwrite) { - config_helper_.addRuntimeOverride( - "envoy.reloadable_features.return_502_for_upstream_protocol_errors", "false"); - testRouterUpstreamProtocolError("503", "UC"); -} +// Test that Envoy returns HTTP code 502 on upstream protocol error. +TEST_P(IntegrationTest, UpstreamProtocolError) { testRouterUpstreamProtocolError("502", "UPE"); } TEST_P(IntegrationTest, TestHead) { initialize(); @@ -2097,6 +2054,98 @@ TEST_P(IntegrationTest, RandomPreconnect) { } } +class TestRetryOptionsPredicateFactory : public Upstream::RetryOptionsPredicateFactory { +public: + Upstream::RetryOptionsPredicateConstSharedPtr + createOptionsPredicate(const Protobuf::Message&, + Upstream::RetryExtensionFactoryContext&) override { + return std::make_shared(); + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom empty config proto. This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } + + std::string name() const override { return "test_retry_options_predicate_factory"; } + +private: + struct TestPredicate : public Upstream::RetryOptionsPredicate { + UpdateOptionsReturn updateOptions(const UpdateOptionsParameters&) const override { + UpdateOptionsReturn ret; + Network::TcpKeepaliveConfig tcp_keepalive_config; + tcp_keepalive_config.keepalive_probes_ = 1; + tcp_keepalive_config.keepalive_time_ = 1; + tcp_keepalive_config.keepalive_interval_ = 1; + ret.new_upstream_socket_options_ = + Network::SocketOptionFactory::buildTcpKeepaliveOptions(tcp_keepalive_config); + return ret; + } + }; +}; + +// Verify that a test retry options predicate starts a new connection pool with a new connection. +TEST_P(IntegrationTest, RetryOptionsPredicate) { + TestRetryOptionsPredicateFactory factory; + Registry::InjectFactory registered(factory); + + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* route_config = hcm.mutable_route_config(); + auto* virtual_host = route_config->mutable_virtual_hosts(0); + auto* route = virtual_host->mutable_routes(0)->mutable_route(); + auto* retry_policy = route->mutable_retry_policy(); + retry_policy->set_retry_on("5xx"); + auto* predicate = retry_policy->add_retry_options_predicates(); + predicate->set_name("test_retry_options_predicate_factory"); + predicate->mutable_typed_config()->set_type_url( + "type.googleapis.com/google.protobuf.Struct"); + }); + + initialize(); + + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, + {":path", "/some/path"}, + {":scheme", "http"}, + {":authority", "cluster_0"}, + }; + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + AssertionResult result = + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_); + RELEASE_ASSERT(result, result.message()); + result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_); + RELEASE_ASSERT(result, result.message()); + result = upstream_request_->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + + // Force a retry and run the predicate + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, true); + + // Using a different socket option will cause a new connection pool to be used and a new + // connection. + FakeHttpConnectionPtr new_upstream_connection; + FakeStreamPtr new_upstream_request; + result = fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, new_upstream_connection); + RELEASE_ASSERT(result, result.message()); + result = new_upstream_connection->waitForNewStream(*dispatcher_, new_upstream_request); + RELEASE_ASSERT(result, result.message()); + result = new_upstream_request->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + + new_upstream_request->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + result = response->waitForEndStream(); + RELEASE_ASSERT(result, result.message()); + + result = new_upstream_connection->close(); + RELEASE_ASSERT(result, result.message()); + result = new_upstream_connection->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); +} + // Tests that a filter (set-route-filter) using the setRoute callback and DelegatingRoute mechanism // successfully overrides the cached route, and subsequently, the request's upstream cluster // selection. diff --git a/test/integration/leds_integration_test.cc b/test/integration/leds_integration_test.cc new file mode 100644 index 0000000000000..c7ceae29e151e --- /dev/null +++ b/test/integration/leds_integration_test.cc @@ -0,0 +1,816 @@ +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/type/v3/http.pb.h" + +#include "test/common/grpc/grpc_client_integration.h" +#include "test/config/utility.h" +#include "test/integration/http_integration.h" +#include "test/test_common/network_utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +// Integration test for LEDS features. CDS is consumed vi filesystem subscription, +// and EDS and LEDS are consumed via using the delta-xDS gRPC protocol. +class LedsIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, + public HttpIntegrationTest { +protected: + struct FakeUpstreamInfo { + FakeHttpConnectionPtr connection_; + FakeUpstream* upstream_{}; + absl::flat_hash_map stream_by_resource_name_; + + static constexpr char default_stream_name[] = "default"; + + // Used for cases where only a single stream is needed. + FakeStreamPtr& defaultStream() { return stream_by_resource_name_[default_stream_name]; } + }; + + LedsIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()), + codec_client_type_(envoy::type::v3::HTTP1) { + use_lds_ = false; + create_xds_upstream_ = false; + // LEDS is only supported by delta-xDS. + sotw_or_delta_ = Grpc::SotwOrDelta::Delta; + } + + ~LedsIntegrationTest() override { + // First disconnect upstream connections to avoid FIN messages causing unexpected + // disconnects on the fake servers. + for (auto& host_upstream_info : hosts_upstreams_info_) { + resetFakeUpstreamInfo(host_upstream_info); + } + + resetFakeUpstreamInfo(leds_upstream_info_); + resetFakeUpstreamInfo(eds_upstream_info_); + } + + // A helper function to set the endpoints health status. + void setEndpointsHealthStatus( + const absl::flat_hash_set& endpoints_idxs, + envoy::config::core::v3::HealthStatus health_status, absl::string_view collection_prefix, + absl::flat_hash_map& + updated_endpoints) { + for (const auto endpoint_idx : endpoints_idxs) { + const std::string endpoint_name = absl::StrCat(collection_prefix, "endpoint", endpoint_idx); + envoy::config::endpoint::v3::LbEndpoint endpoint; + // Shift fake_upstreams_ by 2 (due to EDS and LEDS fake upstreams). + setUpstreamAddress(endpoint_idx + 2, endpoint); + endpoint.set_health_status(health_status); + updated_endpoints.emplace(endpoint_name, endpoint); + } + } + + // Sets endpoints in a specific locality (using the LEDS helper). + // We need to supply the endpoints via LEDS to provide health status. Use a + // filesystem delivery to simplify test mechanics. + void setEndpoints(const absl::flat_hash_set& healthy_endpoints_idxs, + const absl::flat_hash_set& degraded_endpoints_idxs, + const absl::flat_hash_set& unhealthy_endpoints_idxs, + const absl::flat_hash_set& unknown_endpoints_idxs, + const absl::flat_hash_set& removed_endpoints_idxs, + uint32_t locality_idx = 0, bool await_update = true) { + const auto& collection_prefix = localities_prefixes_[locality_idx]; + absl::flat_hash_map updated_endpoints; + std::vector removed_endpoints; + setEndpointsHealthStatus(healthy_endpoints_idxs, envoy::config::core::v3::HEALTHY, + collection_prefix, updated_endpoints); + setEndpointsHealthStatus(degraded_endpoints_idxs, envoy::config::core::v3::DEGRADED, + collection_prefix, updated_endpoints); + setEndpointsHealthStatus(unhealthy_endpoints_idxs, envoy::config::core::v3::UNHEALTHY, + collection_prefix, updated_endpoints); + setEndpointsHealthStatus(unknown_endpoints_idxs, envoy::config::core::v3::UNKNOWN, + collection_prefix, updated_endpoints); + + for (const auto removed_endpoint_idx : removed_endpoints_idxs) { + const std::string endpoint_name = + absl::StrCat(collection_prefix, "endpoint", removed_endpoint_idx); + removed_endpoints.emplace_back(endpoint_name); + } + + sendDeltaLedsResponse(updated_endpoints, removed_endpoints, "7", locality_idx); + + if (await_update) { + // Receive LEDS ack. + EXPECT_TRUE(compareDeltaDiscoveryRequest( + Config::TypeUrl::get().LbEndpoint, {}, {}, + leds_upstream_info_.stream_by_resource_name_[localities_prefixes_[locality_idx]])); + } + } + + // Sends an LEDS response to a specific locality, containing the updated + // endpoints map (resource name to endpoint data), and the list of resource + // names to remove. + void sendDeltaLedsResponse( + const absl::flat_hash_map& + to_update_map, + const std::vector& to_delete_list, const std::string& version, + uint32_t locality_idx) { + auto& locality_stream = + leds_upstream_info_.stream_by_resource_name_[localities_prefixes_[locality_idx]]; + ASSERT(locality_stream != nullptr); + envoy::service::discovery::v3::DeltaDiscoveryResponse response; + response.set_system_version_info(version); + response.set_type_url(Config::TypeUrl::get().LbEndpoint); + + for (const auto& endpoint_name : to_delete_list) { + *response.add_removed_resources() = endpoint_name; + } + for (const auto& [resource_name, lb_endpoint] : to_update_map) { + auto* resource = response.add_resources(); + resource->set_name(resource_name); + resource->set_version(version); + resource->mutable_resource()->PackFrom(lb_endpoint); + } + locality_stream->sendGrpcMessage(response); + } + + void createUpstreams() override { + // Add the EDS upstream. + eds_upstream_info_.upstream_ = &addFakeUpstream(FakeHttpConnection::Type::HTTP2); + // Add the LEDS upstream. + leds_upstream_info_.upstream_ = &addFakeUpstream(FakeHttpConnection::Type::HTTP2); + + // Create backends and initialize their wrapper. + HttpIntegrationTest::createUpstreams(); + // Store all hosts upstreams info in a single place so it would be easily + // accessible. + ASSERT(fake_upstreams_.size() == fake_upstreams_count_ + 2); + hosts_upstreams_info_.reserve(fake_upstreams_count_); + // Skip the first 2 fake upstreams as they are reserved for EDS and LEDS. + for (size_t i = 2; i < fake_upstreams_.size(); ++i) { + FakeUpstreamInfo host_info; + host_info.upstream_ = &(*fake_upstreams_[i]); + hosts_upstreams_info_.emplace_back(std::move(host_info)); + } + } + + // Initialize a gRPC stream of an upstream server. + void initializeStream(FakeUpstreamInfo& upstream_info, + const std::string& resource_name = FakeUpstreamInfo::default_stream_name) { + if (!upstream_info.connection_) { + auto result = + upstream_info.upstream_->waitForHttpConnection(*dispatcher_, upstream_info.connection_); + RELEASE_ASSERT(result, result.message()); + } + if (!upstream_info.stream_by_resource_name_.try_emplace(resource_name, nullptr).second) { + RELEASE_ASSERT(false, + fmt::format("stream with resource name '{}' already exists!", resource_name)); + } + + auto result = upstream_info.connection_->waitForNewStream( + *dispatcher_, upstream_info.stream_by_resource_name_[resource_name]); + RELEASE_ASSERT(result, result.message()); + upstream_info.stream_by_resource_name_[resource_name]->startGrpcStream(); + } + + // A specific function to initialize LEDS streams. This was introduced to + // handle the non-deterministic requests order when more than one locality is + // used. This method first establishes the gRPC stream, fetches the first + // request and reads its requested resource name, and then assigns the stream + // to the internal data-structure. + void initializeAllLedsStreams() { + // Create a set of localities that are expected. + absl::flat_hash_set expected_localities_prefixes(localities_prefixes_.begin(), + localities_prefixes_.end()); + + if (!leds_upstream_info_.connection_) { + auto result = leds_upstream_info_.upstream_->waitForHttpConnection( + *dispatcher_, leds_upstream_info_.connection_); + RELEASE_ASSERT(result, result.message()); + } + + // Wait for the exact number of streams. + for (uint32_t i = 0; i < localities_prefixes_.size(); ++i) { + // Create the stream for the LEDS collection and fetch the name from the + // contents, then validate that this is an expected collection + FakeStreamPtr temp_stream; + envoy::service::discovery::v3::DeltaDiscoveryRequest request; + auto result = leds_upstream_info_.connection_->waitForNewStream(*dispatcher_, temp_stream); + RELEASE_ASSERT(result, result.message()); + temp_stream->startGrpcStream(); + RELEASE_ASSERT(temp_stream->waitForGrpcMessage(*dispatcher_, request), + "LEDS message did not arrive as expected"); + RELEASE_ASSERT(request.resource_names_subscribe().size() == 1, + "Each LEDS request in this test must have a single resource"); + // Remove the "*" from the collection name to match against the set + // contents. + const auto request_collection_name = *request.resource_names_subscribe().begin(); + const auto pos = request_collection_name.find_last_of('*'); + ASSERT(pos != std::string::npos); + const auto request_collection_prefix = request_collection_name.substr(0, pos); + auto set_it = expected_localities_prefixes.find(request_collection_prefix); + ASSERT(set_it != expected_localities_prefixes.end()); + // Associate the stream with the locality prefix. + leds_upstream_info_.stream_by_resource_name_[*set_it] = std::move(temp_stream); + // Remove the locality prefix from the expected set. + expected_localities_prefixes.erase(set_it); + } + } + + void initializeTest(bool http_active_hc, uint32_t localities_num = 1) { + // Set up a single upstream host for the LEDS cluster using HTTP2 (gRPC). + setUpstreamCount(4); + + config_helper_.addConfigModifier([this, http_active_hc]( + envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Add a static EDS cluster. + auto* eds_cluster = bootstrap.mutable_static_resources()->add_clusters(); + eds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + eds_cluster->set_name("eds_cluster"); + eds_cluster->mutable_load_assignment()->set_cluster_name("eds_cluster"); + ConfigHelper::setHttp2(*eds_cluster); + + // Add a static LEDS cluster. + auto* leds_cluster = bootstrap.mutable_static_resources()->add_clusters(); + leds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + leds_cluster->set_name("leds_cluster"); + leds_cluster->mutable_load_assignment()->set_cluster_name("leds_cluster"); + ConfigHelper::setHttp2(*leds_cluster); + + // Remove the static cluster (cluster_0) and set up CDS. + bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_resource_api_version( + envoy::config::core::v3::ApiVersion::V3); + bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path()); + bootstrap.mutable_static_resources()->mutable_clusters()->erase( + bootstrap.mutable_static_resources()->mutable_clusters()->begin()); + + // Set the default static cluster to use EDS. + auto& cluster_0 = cluster_; + cluster_0.set_name("cluster_0"); + cluster_0.set_type(envoy::config::cluster::v3::Cluster::EDS); + cluster_0.mutable_connect_timeout()->CopyFrom(Protobuf::util::TimeUtil::SecondsToDuration(5)); + auto* eds_cluster_config = cluster_0.mutable_eds_cluster_config(); + eds_cluster_config->mutable_eds_config()->set_resource_api_version( + envoy::config::core::v3::ApiVersion::V3); + auto* api_config_source = + eds_cluster_config->mutable_eds_config()->mutable_api_config_source(); + api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::DELTA_GRPC); + api_config_source->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); + auto* grpc_service = api_config_source->add_grpc_services(); + setGrpcService(*grpc_service, "eds_cluster", eds_upstream_info_.upstream_->localAddress()); + if (http_active_hc) { + auto* health_check = cluster_0.add_health_checks(); + health_check->mutable_timeout()->set_seconds(30); + // TODO(mattklein123): Consider using simulated time here. + health_check->mutable_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_no_traffic_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_unhealthy_threshold()->set_value(1); + health_check->mutable_healthy_threshold()->set_value(1); + health_check->mutable_http_health_check()->set_path("/healthcheck"); + health_check->mutable_http_health_check()->set_codec_client_type(codec_client_type_); + } + // Set the cluster using CDS. + cds_helper_.setCds({cluster_}); + }); + + // Set validate_clusters to false to allow us to reference a CDS cluster. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_route_config()->mutable_validate_clusters()->set_value(false); }); + + defer_listener_finalization_ = true; + initialize(); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Create the EDS connection and stream. + initializeStream(eds_upstream_info_); + // Add the assignment and localities. + cluster_load_assignment_.set_cluster_name("cluster_0"); + localities_prefixes_.reserve(localities_num); + for (uint32_t locality_idx = 0; locality_idx < localities_num; ++locality_idx) { + // Setup per locality LEDS config over gRPC. + auto* locality_lb_endpoints = cluster_load_assignment_.add_endpoints(); + locality_lb_endpoints->set_priority(locality_idx); + auto* leds_locality_config = locality_lb_endpoints->mutable_leds_cluster_locality_config(); + auto* leds_config = leds_locality_config->mutable_leds_config(); + + leds_config->set_resource_api_version(envoy::config::core::v3::ApiVersion::V3); + auto* api_config_source = leds_config->mutable_api_config_source(); + api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::DELTA_GRPC); + api_config_source->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); + auto* grpc_service = api_config_source->add_grpc_services(); + setGrpcService(*grpc_service, "leds_cluster", leds_upstream_info_.upstream_->localAddress()); + + const std::string locality_endpoints_prefix = fmt::format( + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/cluster0/locality{}/", locality_idx); + localities_prefixes_.push_back(locality_endpoints_prefix); + const std::string locality_endpoints_collection_name = + absl::StrCat(locality_endpoints_prefix, "*"); + leds_locality_config->set_leds_collection_name(locality_endpoints_collection_name); + } + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Do the initial compareDiscoveryRequest / sendDiscoveryResponse for cluster_'s localities + // (ClusterLoadAssignment). + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, + {"cluster_0"}, {}, + eds_upstream_info_.defaultStream())); + sendDeltaDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {cluster_load_assignment_}, {}, "2", + eds_upstream_info_.defaultStream()); + + // Receive EDS ack. + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, {}, {}, + eds_upstream_info_.defaultStream())); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Create the LEDS connection and stream(s). + // Wait for all the LEDS streams to be established. Note that if more + // than one locality has issued a LEDS request, the order of the requests + // can be non-deterministic (e.g., the request for "locality1" might be + // received before the request for "locality0"). Therefore we first wait + // for all the streams to be established, and only then verify that all the + // requests arrived as expected. + initializeAllLedsStreams(); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + } + + void resetFakeUpstreamInfo(FakeUpstreamInfo& upstream_info) { + if (upstream_info.connection_ == nullptr || upstream_info.upstream_ == nullptr) { + upstream_info.upstream_ = nullptr; + return; + } + AssertionResult result = upstream_info.connection_->close(); + RELEASE_ASSERT(result, result.message()); + result = upstream_info.connection_->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + upstream_info.connection_.reset(); + upstream_info.upstream_ = nullptr; + } + + void waitForHealthCheck(uint32_t upstream_info_idx) { + auto& host_info = hosts_upstreams_info_[upstream_info_idx]; + if (host_info.connection_ == nullptr) { + ASSERT_TRUE(host_info.upstream_->waitForHttpConnection(*dispatcher_, host_info.connection_)); + } + ASSERT_TRUE(host_info.connection_->waitForNewStream(*dispatcher_, host_info.defaultStream())); + ASSERT_TRUE(host_info.defaultStream()->waitForEndStream(*dispatcher_)); + + EXPECT_EQ(host_info.defaultStream()->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(host_info.defaultStream()->headers().getMethodValue(), "GET"); + } + + envoy::type::v3::CodecClientType codec_client_type_{}; + CdsHelper cds_helper_; + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment_; + envoy::config::cluster::v3::Cluster cluster_; + std::vector localities_prefixes_; + std::vector hosts_upstreams_info_; + FakeUpstreamInfo eds_upstream_info_; + FakeUpstreamInfo leds_upstream_info_; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, LedsIntegrationTest, GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); + +// Validates basic LEDS request response behavior. +TEST_P(LedsIntegrationTest, BasicLeds) { + initializeTest(true); + + // Send an endpoint update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first health-check and verify the host is healthy. This should warm the initial + // cluster. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // The endpoint sent a valid health-check so the cluster should be active. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Validates adding endpoints using LEDS. +TEST_P(LedsIntegrationTest, LedsAdd) { + initializeTest(true); + + // Send an endpoint update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first health-check and verify the host is healthy. This should warm the initial + // cluster. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // The cluster should have now a single healthy host. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Add 2 more endpoints using LEDS in unknown state. + setEndpoints({}, {}, {}, {1, 2}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + + // There should be additional 2 backends in the cluster, and only one healthy. + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Send health-check responses back from the new hosts. + for (int i = 1; i < 3; ++i) { + waitForHealthCheck(i); + hosts_upstreams_info_[i].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + } + + // Verify that Envoy observes the healthy endpoints. + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 3); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify that updating the same endpoint doesn't change anything. +TEST_P(LedsIntegrationTest, LedsUpdateSameEndpoint) { + initializeTest(true); + + // Send an endpoint update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first health-check and verify the host is healthy. This should warm the initial + // cluster. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // The cluster should have now a single healthy host. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // "Update" the endpoint by sending the same state. The endpoint should still + // be healthy, as the active health check cleared it. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + + // There should be additional 2 backends in the cluster, and only one healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Verify that Envoy observes the healthy endpoint. + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify endpoint removal using LEDS. +TEST_P(LedsIntegrationTest, EndpointRemoval) { + // Set health-checking to false, so Envoy will remove the endpoint, although it + // is still healthy. + initializeTest(false); + + // Send 2 endpoints update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0, 1}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(3, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // Remove one of the endpoints. + setEndpoints({}, {}, {}, {}, {0}); + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify that a config removing an unknown endpoint is a no-op (similar to CDS). +TEST_P(LedsIntegrationTest, UnknownEndpointRemoval) { + initializeTest(true); + + // Send 2 endpoints update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first health-check and verify the host is healthy. This should warm the initial + // cluster. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // The endpoints sent valid health-checks so the cluster should be active. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Remove one of the endpoints. + setEndpoints({}, {}, {}, {}, {2}); + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_0.leds.update_rejected")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Validates that endpoints can be added and then moved to other localities without causing crashes +// (Primarily as a regression test for https://github.com/envoyproxy/envoy/issues/8764). +TEST_P(LedsIntegrationTest, MoveEndpointsBetweenLocalities) { + // Create 2 localities in the cluster, no health-check as part of this test. + initializeTest(false, 2); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Send an endpoint update using LEDS for locality 0. + setEndpoints({}, {}, {}, {0}, {}, 0); + + // The update only updates the first locality, but the cluster should still be + // in warmed state. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Send an endpoint update using LEDS for locality 1. + setEndpoints({}, {}, {}, {1, 2}, {}, 1); + + // All localities should have endpoints so the cluster warm-up should be over. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3); + + // There should be a single backend in the cluster, all healthy as there isn't + // active health-check. + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // Move one endpoint from locality1 to locality0. + setEndpoints({}, {}, {}, {0, 2}, {}, 0); + setEndpoints({}, {}, {}, {}, {2}, 1); + + // Wait for the additional 2 LEDS updates. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 4); + + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Move one endpoint from locality0 to locality1, and remove the other endpoint. + setEndpoints({}, {}, {}, {}, {2}, 0); + setEndpoints({}, {}, {}, {0}, {}, 1); + + // Wait for the additional 2 LEDS updates. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 6); + + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify that an endpoint can be in 2 localities at the same time. +TEST_P(LedsIntegrationTest, LocalitiesShareEndpoint) { + // Create 2 localities in the cluster, no health-check as part of this test. + initializeTest(false, 2); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Send an endpoint update using LEDS for locality 0. + setEndpoints({}, {}, {}, {0}, {}, 0); + + // The update only updates the first locality, but the cluster should still be + // in warmed state. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Send an endpoint update using LEDS for locality 1 with a different endpoint. + setEndpoints({}, {}, {}, {1}, {}, 1); + + // All localities should have endpoints so the cluster warm-up should be over. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3); + + // There should be 2 hosts in the cluster, all healthy as there isn't active health-check. + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // Send a LEDS update to locality 1 with the same endpoint that is in locality 0. + setEndpoints({}, {}, {}, {0}, {}, 1); + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 3); + + // There should be 2 hosts in the cluster, all healthy as there isn't active health-check. + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Remove the endpoint from one locality. + setEndpoints({}, {}, {}, {}, {0}, 0); + + // Wait for the additional LEDS update. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 4); + + // There are 2 endpoints left in locality 1. + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify that a host stabilized via active health checking which is first removed from LEDS and +// then fails health checking is removed. +TEST_P(LedsIntegrationTest, RemoveAfterHcFail) { + initializeTest(true); + setEndpoints({}, {}, {}, {0}, {}); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.leds.update_success")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first HC and verify the host is healthy. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Clear out the host and verify the host is still healthy. + setEndpoints({}, {}, {}, {}, {0}); + + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.leds.update_success")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Fail HC and verify the host is gone. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "503"}, {"connection", "close"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 0); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_total")->value()); +} + +// Validate that health status updates are consumed from LEDS. +TEST_P(LedsIntegrationTest, HealthUpdate) { + initializeTest(false); + // Initial state, no cluster members. + EXPECT_EQ(0, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // 2 healthy endpoints. + setEndpoints({0, 1}, {}, {}, {}, {}); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // Drop to 0/2 healthy endpoints (2 unknown health state). + setEndpoints({}, {}, {0, 1}, {}, {}); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // Increase to 1/2 healthy endpoints (host 1 will remain unhealthy). + setEndpoints({0}, {}, {1}, {}, {}); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // Add host and modify healthy to 2/3 healthy endpoints. + setEndpoints({2}, {}, {1}, {}, {}); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // Modify healthy to 2/3 healthy and 1/3 degraded. + setEndpoints({}, {1}, {}, {}, {}); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_degraded")->value()); +} + +// Validates that in a LEDS response that contains 2 endpoints with the same +// address, only the first will be used. +TEST_P(LedsIntegrationTest, LedsSameAddressEndpoints) { + initializeTest(false); + + // Send a response with 2 endpoints with a different resource name but that + // map to the same address. + const auto& collection_prefix = localities_prefixes_[0]; + absl::flat_hash_map updated_endpoints; + std::vector removed_endpoints; + + const std::vector endpoints_names{ + absl::StrCat(collection_prefix, "endpoint0"), + absl::StrCat(collection_prefix, "endpoint1"), + }; + + for (const auto& endpoint_name : endpoints_names) { + envoy::config::endpoint::v3::LbEndpoint endpoint; + // Shift fake_upstreams_ by 2 (due to EDS and LEDS fake upstreams). + setUpstreamAddress(2, endpoint); + endpoint.set_health_status(envoy::config::core::v3::HEALTHY); + updated_endpoints.emplace(endpoint_name, endpoint); + } + + sendDeltaLedsResponse(updated_endpoints, removed_endpoints, "7", 0); + + // Await for update (LEDS Ack). + EXPECT_TRUE(compareDeltaDiscoveryRequest( + Config::TypeUrl::get().LbEndpoint, {}, {}, + leds_upstream_info_.stream_by_resource_name_[localities_prefixes_[0]])); + + // Verify that the update is successful. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // Verify that only one endpoint was processed. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/listener_lds_integration_test.cc b/test/integration/listener_lds_integration_test.cc index 47d7859bf5f03..06eacbaa07587 100644 --- a/test/integration/listener_lds_integration_test.cc +++ b/test/integration/listener_lds_integration_test.cc @@ -250,14 +250,14 @@ TEST_P(ListenerIntegrationTest, RejectsUnsupportedTypedPerFilterConfig) { route: cluster: cluster_0 typed_per_filter_config: - envoy.filters.http.health_check: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false + set-response-code: + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + code: 403 http_filters: - - name: envoy.filters.http.health_check + - name: set-response-code typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + code: 402 - name: envoy.filters.http.router )EOF"); sendLdsResponse({listener}, "2"); @@ -554,6 +554,11 @@ TEST_P(ListenerIntegrationTest, ChangeListenerAddress) { EXPECT_EQ(request_size, upstream_request_->bodyLength()); } +struct PerConnection { + std::string response_; + std::unique_ptr client_conn_; + FakeRawConnectionPtr upstream_conn_; +}; class RebalancerTest : public testing::TestWithParam, public BaseIntegrationTest { public: @@ -585,10 +590,7 @@ class RebalancerTest : public testing::TestWithParamset_value(false); virtual_listener_config.set_name("balanced_target_listener"); virtual_listener_config.mutable_connection_balance_config()->mutable_exact_balance(); - - // TODO(lambdai): Replace by getLoopbackAddressUrlString to emulate the real world. - *virtual_listener_config.mutable_address()->mutable_socket_address()->mutable_address() = - "127.0.0.2"; + *virtual_listener_config.mutable_stat_prefix() = target_listener_prefix_; virtual_listener_config.mutable_address()->mutable_socket_address()->set_port_value(80); }); BaseIntegrationTest::initialize(); @@ -604,14 +606,66 @@ class RebalancerTest : public testing::TestWithParam client_conn_; - FakeRawConnectionPtr upstream_conn_; + void verifyBalance(uint32_t repeats = 10) { + // The balancer is balanced as per active connection instead of total connection. + // The below vector maintains all the connections alive. + std::vector connections; + for (uint32_t i = 0; i < repeats * concurrency_; ++i) { + connections.emplace_back(); + connections.back().client_conn_ = + createConnectionAndWrite("dummy", connections.back().response_); + connections.back().client_conn_->waitForConnection(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(connections.back().upstream_conn_)); + } + for (auto& conn : connections) { + conn.client_conn_->close(); + while (!conn.client_conn_->closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + } + ASSERT_EQ(TestUtility::findCounter(test_server_->statStore(), + absl::StrCat("listener.", target_listener_prefix_, + ".worker_0.downstream_cx_total")) + ->value(), + repeats); + ASSERT_EQ(TestUtility::findCounter(test_server_->statStore(), + absl::StrCat("listener.", target_listener_prefix_, + ".worker_1.downstream_cx_total")) + ->value(), + repeats); + } + + // The stats prefix that shared by ipv6 and ipv4 listener. + std::string target_listener_prefix_{"balanced_listener"}; }; +TEST_P(RebalancerTest, BindToPortUpdate) { + concurrency_ = 2; + initialize(); + + ConfigHelper new_config_helper( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); + + new_config_helper.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) + -> void { + // This virtual listener need updating. + auto& virtual_listener_config = *bootstrap.mutable_static_resources()->mutable_listeners(1); + *virtual_listener_config.mutable_address()->mutable_socket_address()->mutable_address() = + bootstrap.static_resources().listeners(0).address().socket_address().address(); + (*(*virtual_listener_config.mutable_metadata()->mutable_filter_metadata())["random_filter_name"] + .mutable_fields())["random_key"] + .set_number_value(2); + }); + // Create an LDS response with the new config, and reload config. + new_config_helper.setLds("1"); + + test_server_->waitForCounterEq("listener_manager.listener_modified", 1); + test_server_->waitForGaugeEq("listener_manager.total_listeners_draining", 0); + + verifyBalance(); +} + // Verify the connections are distributed evenly on the 2 worker threads of the redirected // listener. // Currently flaky because the virtual listener create listen socket anyway despite the socket is @@ -620,36 +674,8 @@ TEST_P(RebalancerTest, DISABLED_RedirectConnectionIsBalancedOnDestinationListene auto ip_address_str = Network::Test::getLoopbackAddressUrlString(TestEnvironment::getIpVersionsForTest().front()); concurrency_ = 2; - int repeats = 10; initialize(); - - // The balancer is balanced as per active connection instead of total connection. - // The below vector maintains all the connections alive. - std::vector connections; - for (uint32_t i = 0; i < repeats * concurrency_; ++i) { - connections.emplace_back(); - connections.back().client_conn_ = - createConnectionAndWrite("dummy", connections.back().response_); - connections.back().client_conn_->waitForConnection(); - ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(connections.back().upstream_conn_)); - } - for (auto& conn : connections) { - conn.client_conn_->close(); - while (!conn.client_conn_->closed()) { - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); - } - } - - ASSERT_EQ(TestUtility::findCounter( - test_server_->statStore(), - absl::StrCat("listener.", ip_address_str, "_80.worker_0.downstream_cx_total")) - ->value(), - repeats); - ASSERT_EQ(TestUtility::findCounter( - test_server_->statStore(), - absl::StrCat("listener.", ip_address_str, "_80.worker_1.downstream_cx_total")) - ->value(), - repeats); + verifyBalance(); } INSTANTIATE_TEST_SUITE_P(IpVersions, RebalancerTest, diff --git a/test/integration/load_stats_integration_test.cc b/test/integration/load_stats_integration_test.cc index 213622cc8cf54..44af79d2321ba 100644 --- a/test/integration/load_stats_integration_test.cc +++ b/test/integration/load_stats_integration_test.cc @@ -23,7 +23,7 @@ class LoadStatsIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, LoadStatsIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) { // We rely on some fairly specific load balancing picks in this test, so // determinize the schedule. - setDeterministic(); + setDeterministicValue(); } void addEndpoint(envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoints, diff --git a/test/integration/multiplexed_integration_test.cc b/test/integration/multiplexed_integration_test.cc index 0357402a6993a..4248c858838b9 100644 --- a/test/integration/multiplexed_integration_test.cc +++ b/test/integration/multiplexed_integration_test.cc @@ -29,7 +29,6 @@ using ::testing::MatchesRegex; namespace Envoy { -// TODO(#2557) fix all the failures. #define EXCLUDE_DOWNSTREAM_HTTP3 \ if (downstreamProtocol() == Http::CodecType::HTTP3) { \ return; \ @@ -906,8 +905,7 @@ TEST_P(Http2IntegrationTest, GrpcRetry) { testGrpcRetry(); } // Verify the case where there is an HTTP/2 codec/protocol error with an active stream. TEST_P(Http2IntegrationTest, CodecErrorAfterStreamStart) { - // TODO(#16757) Needs HTTP/3 "bad frame" equivalent. - EXCLUDE_DOWNSTREAM_HTTP3; + EXCLUDE_DOWNSTREAM_HTTP3; // The HTTP/3 client has no "bad frame" equivalent. initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -940,7 +938,7 @@ TEST_P(Http2IntegrationTest, Http2BadMagic) { } TEST_P(Http2IntegrationTest, BadFrame) { - EXCLUDE_DOWNSTREAM_HTTP3; // Needs HTTP/3 "bad frame" equivalent. + EXCLUDE_DOWNSTREAM_HTTP3; // The HTTP/3 client has no "bad frame" equivalent. initialize(); std::string response; @@ -956,8 +954,7 @@ TEST_P(Http2IntegrationTest, BadFrame) { // Send client headers, a GoAway and then a body and ensure the full request and // response are received. TEST_P(Http2IntegrationTest, GoAway) { - EXCLUDE_DOWNSTREAM_HTTP3; // QuicHttpClientConnectionImpl::goAway NOT_REACHED_GCOVR_EXCL_LINE - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); + autonomous_upstream_ = true; initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -1723,16 +1720,14 @@ TEST_P(Http2FrameIntegrationTest, DownstreamSendingEmptyMetadata) { // This test uses an Http2Frame and not the encoder's encodeMetadata method, // because encodeMetadata fails when an empty metadata map is sent. beginSession(); - FakeHttpConnectionPtr fake_upstream_connection; - FakeStreamPtr fake_upstream_request; const uint32_t client_stream_idx = 1; // Send request. const Http2Frame request = Http2Frame::makePostRequest(client_stream_idx, "host", "/path/to/long/url"); sendFrame(request); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection)); - ASSERT_TRUE(fake_upstream_connection->waitForNewStream(*dispatcher_, fake_upstream_request)); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); // Send metadata frame with empty metadata map. const Http::MetadataMap empty_metadata_map; @@ -1746,9 +1741,9 @@ TEST_P(Http2FrameIntegrationTest, DownstreamSendingEmptyMetadata) { sendFrame(empty_data_frame); // Upstream sends a reply. - ASSERT_TRUE(fake_upstream_request->waitForEndStream(*dispatcher_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); const Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - fake_upstream_request->encodeHeaders(response_headers, true); + upstream_request_->encodeHeaders(response_headers, true); // Make sure that a response from upstream is received by the client, and // close the connection. @@ -1757,7 +1752,10 @@ TEST_P(Http2FrameIntegrationTest, DownstreamSendingEmptyMetadata) { EXPECT_EQ(Http2Frame::ResponseStatus::Ok, response.responseStatus()); EXPECT_EQ(1, test_server_->counter("http2.metadata_empty_frames")->value()); - // Cleanup. + // Cleanup. Closing upstream connection first to avoid a race between the + // client FIN and the connection closure (see comment in + // HttpIntegrationTest::cleanupUpstreamAndDownstream). + cleanupUpstreamAndDownstream(); tcp_client_->close(); } diff --git a/test/integration/multiplexed_upstream_integration_test.cc b/test/integration/multiplexed_upstream_integration_test.cc index 6dedddbdea612..2a785467bfd07 100644 --- a/test/integration/multiplexed_upstream_integration_test.cc +++ b/test/integration/multiplexed_upstream_integration_test.cc @@ -193,65 +193,6 @@ TEST_P(Http2UpstreamIntegrationTest, BidirectionalStreamingReset) { EXPECT_EQ(1, downstreamTxResetCounterValue()); } -void Http2UpstreamIntegrationTest::simultaneousRequest(uint32_t request1_bytes, - uint32_t request2_bytes, - uint32_t response1_bytes, - uint32_t response2_bytes) { - FakeStreamPtr upstream_request1; - FakeStreamPtr upstream_request2; - initialize(); - codec_client_ = makeHttpConnection(lookupPort("http")); - - // Start request 1 - auto encoder_decoder1 = - codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}); - Http::RequestEncoder* encoder1 = &encoder_decoder1.first; - auto response1 = std::move(encoder_decoder1.second); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request1)); - - // Start request 2 - auto encoder_decoder2 = - codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}); - Http::RequestEncoder* encoder2 = &encoder_decoder2.first; - auto response2 = std::move(encoder_decoder2.second); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request2)); - - // Finish request 1 - codec_client_->sendData(*encoder1, request1_bytes, true); - ASSERT_TRUE(upstream_request1->waitForEndStream(*dispatcher_)); - - // Finish request 2 - codec_client_->sendData(*encoder2, request2_bytes, true); - ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_)); - - // Respond to request 2 - upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); - upstream_request2->encodeData(response2_bytes, true); - ASSERT_TRUE(response2->waitForEndStream()); - EXPECT_TRUE(upstream_request2->complete()); - EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); - EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().getStatusValue()); - EXPECT_EQ(response2_bytes, response2->body().size()); - - // Respond to request 1 - upstream_request1->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); - upstream_request1->encodeData(response1_bytes, true); - ASSERT_TRUE(response1->waitForEndStream()); - EXPECT_TRUE(upstream_request1->complete()); - EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); - EXPECT_TRUE(response1->complete()); - EXPECT_EQ("200", response1->headers().getStatusValue()); - EXPECT_EQ(response1_bytes, response1->body().size()); -} - TEST_P(Http2UpstreamIntegrationTest, SimultaneousRequest) { simultaneousRequest(1024, 512, 1023, 513); } @@ -261,17 +202,6 @@ TEST_P(Http2UpstreamIntegrationTest, LargeSimultaneousRequestWithBufferLimits) { simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16); } -TEST_P(Http2UpstreamIntegrationTest, SimultaneousRequestAlpn) { - use_alpn_ = true; - simultaneousRequest(1024, 512, 1023, 513); -} - -TEST_P(Http2UpstreamIntegrationTest, LargeSimultaneousRequestWithBufferLimitsAlpn) { - use_alpn_ = true; - config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. - simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16); -} - void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_bytes, uint32_t) { TestRandomGenerator rand; const uint32_t num_requests = 50; @@ -543,8 +473,6 @@ TEST_P(Http2UpstreamIntegrationTest, ConfigureHttpOverGrpcLogs) { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - const std::string access_log_name = - TestEnvironment::temporaryPath(TestUtility::uniqueFilename()); // Configure just enough of an upstream access log to reference the upstream headers. const std::string yaml_string = R"EOF( name: router @@ -639,47 +567,4 @@ TEST_P(Http2UpstreamIntegrationTest, UpstreamGoaway) { cleanupUpstreamAndDownstream(); } -#ifdef ENVOY_ENABLE_QUIC - -class MixedUpstreamIntegrationTest : public Http2UpstreamIntegrationTest { -protected: - void initialize() override { - use_alpn_ = true; - Http2UpstreamIntegrationTest::initialize(); - } - void createUpstreams() override { - ASSERT_EQ(upstreamProtocol(), Http::CodecType::HTTP3); - ASSERT_EQ(fake_upstreams_count_, 1); - ASSERT_FALSE(autonomous_upstream_); - - if (use_http2_) { - auto config = configWithType(Http::CodecType::HTTP2); - Network::TransportSocketFactoryPtr factory = createUpstreamTlsContext(config); - addFakeUpstream(std::move(factory), Http::CodecType::HTTP2); - } else { - auto config = configWithType(Http::CodecType::HTTP3); - Network::TransportSocketFactoryPtr factory = createUpstreamTlsContext(config); - addFakeUpstream(std::move(factory), Http::CodecType::HTTP3); - } - } - - bool use_http2_{false}; -}; - -TEST_P(MixedUpstreamIntegrationTest, SimultaneousRequestAutoWithHttp3) { - testRouterRequestAndResponseWithBody(0, 0, false); -} - -TEST_P(MixedUpstreamIntegrationTest, SimultaneousRequestAutoWithHttp2) { - use_http2_ = true; - testRouterRequestAndResponseWithBody(0, 0, false); -} - -INSTANTIATE_TEST_SUITE_P(Protocols, MixedUpstreamIntegrationTest, - testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( - {Http::CodecType::HTTP2}, {Http::CodecType::HTTP3})), - HttpProtocolIntegrationTest::protocolTestParamsToString); - -#endif - } // namespace Envoy diff --git a/test/integration/multiplexed_upstream_integration_test.h b/test/integration/multiplexed_upstream_integration_test.h index a0903c717d2ec..0b2eecc318c44 100644 --- a/test/integration/multiplexed_upstream_integration_test.h +++ b/test/integration/multiplexed_upstream_integration_test.h @@ -14,8 +14,6 @@ class Http2UpstreamIntegrationTest : public HttpProtocolIntegrationTest { } void bidirectionalStreaming(uint32_t bytes); - void simultaneousRequest(uint32_t request1_bytes, uint32_t request2_bytes, - uint32_t response1_bytes, uint32_t response2_bytes); void manySimultaneousRequests(uint32_t request_bytes, uint32_t response_bytes); bool use_alpn_{false}; diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index e6349bd3b8f88..4db44f4f03dc7 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -53,12 +53,6 @@ void setDoNotValidateRouteConfig( route_config->mutable_validate_clusters()->set_value(false); }; -// TODO(#2557) fix all the failures. -#define EXCLUDE_DOWNSTREAM_HTTP3 \ - if (downstreamProtocol() == Http::CodecType::HTTP3) { \ - return; \ - } - TEST_P(ProtocolIntegrationTest, TrailerSupportHttp1) { config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1()); @@ -170,48 +164,6 @@ TEST_P(ProtocolIntegrationTest, UnknownResponsecode) { EXPECT_EQ("600", response->headers().getStatusValue()); } -// Add a health check filter and verify correct computation of health based on upstream status. -TEST_P(DownstreamProtocolIntegrationTest, ComputedHealthCheck) { - config_helper_.prependFilter(R"EOF( -name: health_check -typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false - cluster_min_healthy_percentages: - example_cluster_name: { value: 75 } -)EOF"); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ - {":method", "GET"}, {":path", "/healthcheck"}, {":scheme", "http"}, {":authority", "host"}}); - ASSERT_TRUE(response->waitForEndStream()); - - EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().getStatusValue()); -} - -// Add a health check filter and verify correct computation of health based on upstream status. -TEST_P(DownstreamProtocolIntegrationTest, ModifyBuffer) { - config_helper_.prependFilter(R"EOF( -name: health_check -typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false - cluster_min_healthy_percentages: - example_cluster_name: { value: 75 } -)EOF"); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ - {":method", "GET"}, {":path", "/healthcheck"}, {":scheme", "http"}, {":authority", "host"}}); - ASSERT_TRUE(response->waitForEndStream()); - - EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().getStatusValue()); -} - // Verifies behavior for https://github.com/envoyproxy/envoy/pull/11248 TEST_P(ProtocolIntegrationTest, AddBodyToRequestAndWaitForIt) { config_helper_.prependFilter(R"EOF( @@ -581,8 +533,6 @@ TEST_P(DownstreamProtocolIntegrationTest, DownstreamRequestWithFaultyFilter) { } TEST_P(DownstreamProtocolIntegrationTest, FaultyFilterWithConnect) { - // TODO(danzh) re-enable after adding http3 option "allow_connect". - EXCLUDE_DOWNSTREAM_HTTP3; if (upstreamProtocol() == Http::CodecType::HTTP3) { // For QUIC, even through the headers are not sent upstream, the stream will // be created. Use the autonomous upstream and allow incomplete streams. @@ -592,15 +542,10 @@ TEST_P(DownstreamProtocolIntegrationTest, FaultyFilterWithConnect) { // Faulty filter that removed host in a CONNECT request. config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { ConfigHelper::setConnectConfig(hcm, false, false); }); - config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - // Clone the whole listener. - auto static_resources = bootstrap.mutable_static_resources(); - auto* old_listener = static_resources->mutable_listeners(0); - auto* cloned_listener = static_resources->add_listeners(); - cloned_listener->CopyFrom(*old_listener); - old_listener->set_name("http_forward"); - }); + hcm) -> void { + ConfigHelper::setConnectConfig(hcm, false, false, + downstreamProtocol() == Http::CodecType::HTTP3); + }); useAccessLog("%RESPONSE_CODE_DETAILS%"); config_helper_.prependFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " "type.googleapis.com/google.protobuf.Empty } }"); @@ -611,9 +556,7 @@ TEST_P(DownstreamProtocolIntegrationTest, FaultyFilterWithConnect) { auto headers = Http::TestRequestHeaderMapImpl{ {":method", "CONNECT"}, {":scheme", "http"}, {":authority", "www.host.com:80"}}; - auto response = (downstream_protocol_ == Http::CodecType::HTTP1) - ? std::move((codec_client_->startRequest(headers)).second) - : codec_client_->makeHeaderOnlyRequest(headers); + auto response = std::move((codec_client_->startRequest(headers)).second); ASSERT_TRUE(response->waitForEndStream()); EXPECT_TRUE(response->complete()); @@ -2666,8 +2609,6 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectIsBlocked) { // Make sure that with override_stream_error_on_invalid_http_message true, CONNECT // results in stream teardown not connection teardown. TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { - // TODO(danzh) add "allow_connect" to http3 options. - EXCLUDE_DOWNSTREAM_HTTP3; if (downstreamProtocol() == Http::CodecType::HTTP1) { return; } @@ -2684,8 +2625,8 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ - {":method", "CONNECT"}, {":path", "/"}, {":authority", "host"}}); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "CONNECT"}, {":authority", "host"}}); ASSERT_TRUE(response->waitForReset()); EXPECT_FALSE(codec_client_->disconnected()); diff --git a/test/integration/quic_http_integration_test.cc b/test/integration/quic_http_integration_test.cc index 5fe18fe47eadb..1fbd3ceb5a4c2 100644 --- a/test/integration/quic_http_integration_test.cc +++ b/test/integration/quic_http_integration_test.cc @@ -71,10 +71,7 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, : HttpIntegrationTest(Http::CodecType::HTTP3, GetParam(), ConfigHelper::quicHttpProxyConfig()), supported_versions_(quic::CurrentSupportedHttp3Versions()), conn_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *conn_helper_.GetClock()) { - // Enable this flag for test coverage. - SetQuicReloadableFlag(quic_tls_set_signature_algorithm_prefs, true); - } + alarm_factory_(*dispatcher_, *conn_helper_.GetClock()) {} ~QuicHttpIntegrationTest() override { cleanupUpstreamAndDownstream(); @@ -87,6 +84,12 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, uint32_t port, const Network::ConnectionSocket::OptionsSharedPtr& options) override { // Setting socket options is not supported. ASSERT(!options); + return makeClientConnectionWithHost(port, ""); + } + + Network::ClientConnectionPtr makeClientConnectionWithHost(uint32_t port, + const std::string& host) { + // Setting socket options is not supported. server_addr_ = Network::Utility::resolveUrl( fmt::format("udp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); Network::Address::InstanceConstSharedPtr local_addr = @@ -103,8 +106,9 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, auto& persistent_info = static_cast(*quic_connection_persistent_info_); auto session = std::make_unique( persistent_info.quic_config_, supported_versions_, std::move(connection), - persistent_info.server_id_, persistent_info.cryptoConfig(), &push_promise_index_, - *dispatcher_, + (host.empty() ? persistent_info.server_id_ + : quic::QuicServerId{host, static_cast(port), false}), + persistent_info.cryptoConfig(), &push_promise_index_, *dispatcher_, // Use smaller window than the default one to have test coverage of client codec buffer // exceeding high watermark. /*send_buffer_limit=*/2 * Http2::Utility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE, @@ -119,8 +123,6 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, HttpIntegrationTest::makeRawHttpConnection(std::move(conn), http2_options); if (!codec->disconnected()) { codec->setCodecClientCallbacks(client_codec_callback_); - EXPECT_EQ(transport_socket_factory_->clientContextConfig().serverNameIndication(), - codec->connection()->requestedServerName()); } return codec; } @@ -240,6 +242,8 @@ TEST_P(QuicHttpIntegrationTest, ZeroRtt) { initialize(); // Start the first connection. codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + EXPECT_EQ(transport_socket_factory_->clientContextConfig().serverNameIndication(), + codec_client_->connection()->requestedServerName()); // Send a complete request on the first connection. auto response1 = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(0); @@ -317,6 +321,9 @@ TEST_P(QuicHttpIntegrationTest, PortMigration) { auto response = std::move(encoder_decoder.second); codec_client_->sendData(*request_encoder_, 1024u, false); + while (!quic_connection_->IsHandshakeConfirmed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } // Change to a new port by switching socket, and connection should still continue. Network::Address::InstanceConstSharedPtr local_addr = @@ -436,5 +443,187 @@ TEST_P(QuicHttpIntegrationTest, ResetRequestWithInvalidCharacter) { ASSERT_TRUE(response->waitForReset()); } +class QuicInplaceLdsIntegrationTest : public QuicHttpIntegrationTest { +public: + void inplaceInitialize(bool add_default_filter_chain = false) { + autonomous_upstream_ = true; + setUpstreamCount(2); + + config_helper_.addConfigModifier([add_default_filter_chain]( + envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* filter_chain_0 = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + *filter_chain_0->mutable_filter_chain_match()->mutable_server_names()->Add() = "www.lyft.com"; + auto* filter_chain_1 = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_filter_chains() + ->Add(); + filter_chain_1->MergeFrom(*filter_chain_0); + + // filter chain 1 route to cluster_1 + *filter_chain_1->mutable_filter_chain_match()->mutable_server_names(0) = "lyft.com"; + + filter_chain_0->set_name("filter_chain_0"); + filter_chain_1->set_name("filter_chain_1"); + + auto* config_blob = filter_chain_1->mutable_filters(0)->mutable_typed_config(); + + ASSERT_TRUE(config_blob->Is()); + auto hcm_config = MessageUtil::anyConvert< + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>( + *config_blob); + hcm_config.mutable_route_config() + ->mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->set_cluster("cluster_1"); + config_blob->PackFrom(hcm_config); + bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( + *bootstrap.mutable_static_resources()->mutable_clusters(0)); + bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name("cluster_1"); + + if (add_default_filter_chain) { + auto default_filter_chain = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_default_filter_chain(); + default_filter_chain->MergeFrom(*filter_chain_0); + default_filter_chain->set_name("filter_chain_default"); + } + }); + + QuicHttpIntegrationTest::initialize(); + } + + void makeRequestAndWaitForResponse(IntegrationCodecClient& codec_client) { + IntegrationStreamDecoderPtr response = + codec_client.makeHeaderOnlyRequest(default_request_headers_); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_FALSE(codec_client.sawGoAway()); + } +}; + +INSTANTIATE_TEST_SUITE_P(QuicHttpIntegrationTests, QuicInplaceLdsIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(QuicInplaceLdsIntegrationTest, ReloadConfigUpdateNonDefaultFilterChain) { + inplaceInitialize(/*add_default_filter_chain=*/false); + + auto codec_client_0 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "www.lyft.com")); + auto codec_client_1 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "lyft.com")); + + // Remove filter_chain_1. + ConfigHelper new_config_helper( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->RemoveLast(); + }); + + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + makeRequestAndWaitForResponse(*codec_client_0); + EXPECT_TRUE(codec_client_1->sawGoAway()); + codec_client_1->close(); + + auto codec_client_2 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "www.lyft.com")); + makeRequestAndWaitForResponse(*codec_client_2); + codec_client_2->close(); + + // Update filter chain again to add back filter_chain_1. + config_helper_.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 2); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 3); + + auto codec_client_3 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "lyft.com")); + makeRequestAndWaitForResponse(*codec_client_3); + makeRequestAndWaitForResponse(*codec_client_0); + codec_client_0->close(); + codec_client_3->close(); +} + +// Verify that the connection received GO_AWAY after its filter chain gets deleted during the +// listener update. +TEST_P(QuicInplaceLdsIntegrationTest, ReloadConfigUpdateDefaultFilterChain) { + inplaceInitialize(/*add_default_filter_chain=*/true); + + auto codec_client_0 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "www.lyft.com")); + + // Remove filter_chain_1. + ConfigHelper new_config_helper( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->RemoveLast(); + }); + + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + // This connection should pick up the default filter chain. + auto codec_client_default = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "lyft.com")); + makeRequestAndWaitForResponse(*codec_client_default); + makeRequestAndWaitForResponse(*codec_client_0); + + // Modify the default filter chain. + ConfigHelper new_config_helper1( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(new_config_helper.bootstrap())); + new_config_helper1.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) + -> void { + auto default_filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_default_filter_chain(); + default_filter_chain->set_name("default_filter_chain_v3"); + }); + + new_config_helper1.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 2); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + + makeRequestAndWaitForResponse(*codec_client_0); + EXPECT_TRUE(codec_client_default->sawGoAway()); + codec_client_default->close(); + + // This connection should pick up the new default filter chain. + auto codec_client_1 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "lyft.com")); + makeRequestAndWaitForResponse(*codec_client_1); + + // Remove the default filter chain. + ConfigHelper new_config_helper2( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(new_config_helper1.bootstrap())); + new_config_helper2.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->clear_default_filter_chain(); + }); + + new_config_helper2.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 3); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + + makeRequestAndWaitForResponse(*codec_client_0); + codec_client_0->close(); + EXPECT_TRUE(codec_client_1->sawGoAway()); + codec_client_1->close(); +} + } // namespace Quic } // namespace Envoy diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index 6c9f983438f28..a88b83a1917b4 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -1,8 +1,5 @@ #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" -#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" -#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" -#include "envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h" #include "test/integration/http_protocol_integration.h" @@ -578,207 +575,6 @@ TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithResponseBody) { EXPECT_THAT(waitForAccessLog(access_log_name_, 1), HasSubstr("200 via_upstream -\n")); } -TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByPreviousRoutesPredicate) { - useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); - auto handle_prevent_repeated_target = - config_helper_.createVirtualHost("handle.internal.redirect.no.repeated.target"); - auto* internal_redirect_policy = handle_prevent_repeated_target.mutable_routes(0) - ->mutable_route() - ->mutable_internal_redirect_policy(); - internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); - envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig - previous_routes_config; - auto* predicate = internal_redirect_policy->add_predicates(); - predicate->set_name("previous_routes"); - predicate->mutable_typed_config()->PackFrom(previous_routes_config); - config_helper_.addVirtualHost(handle_prevent_repeated_target); - - // Validate that header sanitization is only called once. - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { hcm.set_via("via_value"); }); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - - default_request_headers_.setHost("handle.internal.redirect.no.repeated.target"); - IntegrationStreamDecoderPtr response = - codec_client_->makeHeaderOnlyRequest(default_request_headers_); - - auto first_request = waitForNextStream(); - // Redirect to another route - redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); - first_request->encodeHeaders(redirect_response_, true); - - auto second_request = waitForNextStream(); - // Redirect back to the original route. - redirect_response_.setLocation("http://handle.internal.redirect.no.repeated.target/another/path"); - second_request->encodeHeaders(redirect_response_, true); - - auto third_request = waitForNextStream(); - // Redirect to the same route as the first redirect. This should fail. - redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/yet/another/path"); - third_request->encodeHeaders(redirect_response_, true); - - ASSERT_TRUE(response->waitForEndStream()); - ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().getStatusValue()); - EXPECT_EQ("http://handle.internal.redirect.max.three.hop/yet/another/path", - response->headers().getLocationValue()); - EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") - ->value()); - EXPECT_EQ( - 1, - test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_, 0), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 1), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 2), - HasSubstr("302 via_upstream test-header-value\n")); - EXPECT_EQ("test-header-value", - response->headers().get(test_header_key_)[0]->value().getStringView()); -} - -TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByAllowListedRoutesPredicate) { - useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); - auto handle_allow_listed_redirect_route = - config_helper_.createVirtualHost("handle.internal.redirect.only.allow.listed.target"); - auto* internal_redirect_policy = handle_allow_listed_redirect_route.mutable_routes(0) - ->mutable_route() - ->mutable_internal_redirect_policy(); - - auto* allow_listed_routes_predicate = internal_redirect_policy->add_predicates(); - allow_listed_routes_predicate->set_name("allow_listed_routes"); - envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig - allow_listed_routes_config; - *allow_listed_routes_config.add_allowed_route_names() = "max_three_hop"; - allow_listed_routes_predicate->mutable_typed_config()->PackFrom(allow_listed_routes_config); - - internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); - - config_helper_.addVirtualHost(handle_allow_listed_redirect_route); - - // Validate that header sanitization is only called once. - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { hcm.set_via("via_value"); }); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - - default_request_headers_.setHost("handle.internal.redirect.only.allow.listed.target"); - IntegrationStreamDecoderPtr response = - codec_client_->makeHeaderOnlyRequest(default_request_headers_); - - auto first_request = waitForNextStream(); - // Redirect to another route - redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); - first_request->encodeHeaders(redirect_response_, true); - - auto second_request = waitForNextStream(); - // Redirect back to the original route. - redirect_response_.setLocation( - "http://handle.internal.redirect.only.allow.listed.target/another/path"); - second_request->encodeHeaders(redirect_response_, true); - - auto third_request = waitForNextStream(); - // Redirect to the non-allow-listed route. This should fail. - redirect_response_.setLocation("http://handle.internal.redirect/yet/another/path"); - third_request->encodeHeaders(redirect_response_, true); - - ASSERT_TRUE(response->waitForEndStream()); - ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().getStatusValue()); - EXPECT_EQ("http://handle.internal.redirect/yet/another/path", - response->headers().getLocationValue()); - EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") - ->value()); - EXPECT_EQ( - 1, - test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_, 0), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 1), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 2), - HasSubstr("302 via_upstream test-header-value\n")); - EXPECT_EQ("test-header-value", - response->headers().get(test_header_key_)[0]->value().getStringView()); -} - -TEST_P(RedirectIntegrationTest, InternalRedirectPreventedBySafeCrossSchemePredicate) { - useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); - auto handle_safe_cross_scheme_route = config_helper_.createVirtualHost( - "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); - auto* internal_redirect_policy = handle_safe_cross_scheme_route.mutable_routes(0) - ->mutable_route() - ->mutable_internal_redirect_policy(); - - internal_redirect_policy->set_allow_cross_scheme_redirect(true); - - auto* predicate = internal_redirect_policy->add_predicates(); - predicate->set_name("safe_cross_scheme_predicate"); - envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig - predicate_config; - predicate->mutable_typed_config()->PackFrom(predicate_config); - - internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); - - config_helper_.addVirtualHost(handle_safe_cross_scheme_route); - - // Validate that header sanitization is only called once. - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { hcm.set_via("via_value"); }); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - - default_request_headers_.setHost( - "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); - IntegrationStreamDecoderPtr response = - codec_client_->makeHeaderOnlyRequest(default_request_headers_); - - auto first_request = waitForNextStream(); - // Redirect to another route - redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); - first_request->encodeHeaders(redirect_response_, true); - - auto second_request = waitForNextStream(); - // Redirect back to the original route. - redirect_response_.setLocation( - "http://handle.internal.redirect.only.allow.safe.cross.scheme.redirect/another/path"); - second_request->encodeHeaders(redirect_response_, true); - - auto third_request = waitForNextStream(); - // Redirect to https target. This should fail. - redirect_response_.setLocation("https://handle.internal.redirect/yet/another/path"); - third_request->encodeHeaders(redirect_response_, true); - - ASSERT_TRUE(response->waitForEndStream()); - ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().getStatusValue()); - EXPECT_EQ("https://handle.internal.redirect/yet/another/path", - response->headers().getLocationValue()); - EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") - ->value()); - EXPECT_EQ( - 1, - test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_, 0), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 1), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 2), - HasSubstr("302 via_upstream test-header-value\n")); - EXPECT_EQ("test-header-value", - response->headers().get(test_header_key_)[0]->value().getStringView()); -} - TEST_P(RedirectIntegrationTest, InvalidRedirect) { useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); initialize(); diff --git a/test/integration/sds_static_integration_test.cc b/test/integration/sds_static_integration_test.cc index 47d19d59fa545..ac712f7156021 100644 --- a/test/integration/sds_static_integration_test.cc +++ b/test/integration/sds_static_integration_test.cc @@ -67,7 +67,7 @@ class SdsStaticDownstreamIntegrationTest tls_certificate->mutable_private_key()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/serverkey.pem")); }); - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); HttpIntegrationTest::initialize(); registerTestServerPorts({"http"}); diff --git a/test/integration/server.cc b/test/integration/server.cc index 451c951d20e3a..2bdeff2b63fd5 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -55,7 +55,7 @@ OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::str IntegrationTestServerPtr IntegrationTestServer::create( const std::string& config_path, const Network::Address::IpVersion version, std::function server_ready_function, - std::function on_server_init_function, bool deterministic, + std::function on_server_init_function, absl::optional deterministic_value, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, @@ -65,7 +65,7 @@ IntegrationTestServerPtr IntegrationTestServer::create( if (server_ready_function != nullptr) { server->setOnServerReadyCb(server_ready_function); } - server->start(version, on_server_init_function, deterministic, defer_listener_finalization, + server->start(version, on_server_init_function, deterministic_value, defer_listener_finalization, process_object, validation_config, concurrency, drain_time, drain_strategy, watermark_factory); return server; @@ -95,21 +95,19 @@ void IntegrationTestServer::unsetDynamicContextParam(absl::string_view resource_ }); } -void IntegrationTestServer::start(const Network::Address::IpVersion version, - std::function on_server_init_function, bool deterministic, - bool defer_listener_finalization, - ProcessObjectOptRef process_object, - Server::FieldValidationConfig validator_config, - uint32_t concurrency, std::chrono::seconds drain_time, - Server::DrainStrategy drain_strategy, - Buffer::WatermarkFactorySharedPtr watermark_factory) { +void IntegrationTestServer::start( + const Network::Address::IpVersion version, std::function on_server_init_function, + absl::optional deterministic_value, bool defer_listener_finalization, + ProcessObjectOptRef process_object, Server::FieldValidationConfig validator_config, + uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, + Buffer::WatermarkFactorySharedPtr watermark_factory) { ENVOY_LOG(info, "starting integration test server"); ASSERT(!thread_); - thread_ = api_.threadFactory().createThread([version, deterministic, process_object, + thread_ = api_.threadFactory().createThread([version, deterministic_value, process_object, validator_config, concurrency, drain_time, drain_strategy, watermark_factory, this]() -> void { - threadRoutine(version, deterministic, process_object, validator_config, concurrency, drain_time, - drain_strategy, watermark_factory); + threadRoutine(version, deterministic_value, process_object, validator_config, concurrency, + drain_time, drain_strategy, watermark_factory); }); // If any steps need to be done prior to workers starting, do them now. E.g., xDS pre-init. @@ -183,7 +181,8 @@ void IntegrationTestServer::serverReady() { } void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion version, - bool deterministic, ProcessObjectOptRef process_object, + absl::optional deterministic_value, + ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, @@ -193,11 +192,13 @@ void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion vers Thread::MutexBasicLockable lock; Random::RandomGeneratorPtr random_generator; - if (deterministic) { - random_generator = std::make_unique>(); + if (deterministic_value.has_value()) { + random_generator = std::make_unique>( + deterministic_value.value()); } else { random_generator = std::make_unique(); } + createAndRunEnvoyServer(options, time_system_, Network::Utility::getLocalAddress(version), *this, lock, *this, std::move(random_generator), process_object, watermark_factory); diff --git a/test/integration/server.h b/test/integration/server.h index 8ad4c633d6892..5144f2203646e 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -415,7 +415,7 @@ class IntegrationTestServer : public Logger::Loggable, static IntegrationTestServerPtr create( const std::string& config_path, const Network::Address::IpVersion version, std::function on_server_ready_function, - std::function on_server_init_function, bool deterministic, + std::function on_server_init_function, absl::optional deterministic_value, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization = false, ProcessObjectOptRef process_object = absl::nullopt, Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(), @@ -445,10 +445,11 @@ class IntegrationTestServer : public Logger::Loggable, void onWorkersStarted() override {} void start(const Network::Address::IpVersion version, - std::function on_server_init_function, bool deterministic, - bool defer_listener_finalization, ProcessObjectOptRef process_object, - Server::FieldValidationConfig validation_config, uint32_t concurrency, - std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, + std::function on_server_init_function, + absl::optional deterministic_value, bool defer_listener_finalization, + ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, + uint32_t concurrency, std::chrono::seconds drain_time, + Server::DrainStrategy drain_strategy, Buffer::WatermarkFactorySharedPtr watermark_factory); void waitForCounterEq(const std::string& name, uint64_t value, @@ -553,7 +554,8 @@ class IntegrationTestServer : public Logger::Loggable, /** * Runs the real server on a thread. */ - void threadRoutine(const Network::Address::IpVersion version, bool deterministic, + void threadRoutine(const Network::Address::IpVersion version, + absl::optional deterministic_value, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index ffe0dcbc742c3..14b2b8f61bf7e 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -270,6 +270,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSize) { // 2020/10/02 13251 39326 switch to google tcmalloc // 2021/08/15 17290 40349 add all host map to priority set for fast host // searching + // 2021/08/18 13176 40577 40700 Support slow start mode // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -290,7 +291,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSize) { // https://github.com/envoyproxy/envoy/issues/12209 // EXPECT_MEMORY_EQ(m_per_cluster, 37061); } - EXPECT_MEMORY_LE(m_per_cluster, 40350); // Round up to allow platform variations. + EXPECT_MEMORY_LE(m_per_cluster, 40700); // Round up to allow platform variations. } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index d45f72d59910c..ea3d79afd5894 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -12,19 +12,16 @@ namespace Envoy { namespace { // Terminating CONNECT and sending raw TCP upstream. -class ConnectTerminationIntegrationTest - : public testing::TestWithParam, - public HttpIntegrationTest { +class ConnectTerminationIntegrationTest : public HttpProtocolIntegrationTest { public: - ConnectTerminationIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP2, GetParam()) { - enableHalfClose(true); - } + ConnectTerminationIntegrationTest() { enableHalfClose(true); } void initialize() override { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) { - ConfigHelper::setConnectConfig(hcm, true, allow_post_); + ConfigHelper::setConnectConfig(hcm, true, allow_post_, + downstream_protocol_ == Http::CodecType::HTTP3); if (enable_timeout_) { hcm.mutable_stream_idle_timeout()->set_seconds(0); @@ -69,6 +66,30 @@ class ConnectTerminationIntegrationTest {":protocol", "bytestream"}, {":scheme", "https"}, {":authority", "host:80"}}; + void clearExtendedConnectHeaders() { + connect_headers_.removeProtocol(); + connect_headers_.removePath(); + } + + void sendBidirectionalDataAndCleanShutdown() { + sendBidirectionalData("hello", "hello", "there!", "there!"); + // Send a second set of data to make sure for example headers are only sent once. + sendBidirectionalData(",bye", "hello,bye", "ack", "there!ack"); + + // Send an end stream. This should result in half close upstream. + codec_client_->sendData(*request_encoder_, "", true); + ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); + + // Now send a FIN from upstream. This should result in clean shutdown downstream. + ASSERT_TRUE(fake_raw_upstream_connection_->close()); + if (downstream_protocol_ == Http::CodecType::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + ASSERT_TRUE(response_->waitForEndStream()); + ASSERT_FALSE(response_->reset()); + } + } + FakeRawConnectionPtr fake_raw_upstream_connection_; IntegrationStreamDecoderPtr response_; bool enable_timeout_{}; @@ -76,22 +97,19 @@ class ConnectTerminationIntegrationTest bool allow_post_{}; }; -TEST_P(ConnectTerminationIntegrationTest, Basic) { +TEST_P(ConnectTerminationIntegrationTest, OriginalStyle) { initialize(); + clearExtendedConnectHeaders(); setUpConnection(); - sendBidirectionalData("hello", "hello", "there!", "there!"); - // Send a second set of data to make sure for example headers are only sent once. - sendBidirectionalData(",bye", "hello,bye", "ack", "there!ack"); + sendBidirectionalDataAndCleanShutdown(); +} - // Send an end stream. This should result in half close upstream. - codec_client_->sendData(*request_encoder_, "", true); - ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); +TEST_P(ConnectTerminationIntegrationTest, Basic) { + initialize(); - // Now send a FIN from upstream. This should result in clean shutdown downstream. - ASSERT_TRUE(fake_raw_upstream_connection_->close()); - ASSERT_TRUE(response_->waitForEndStream()); - ASSERT_FALSE(response_->reset()); + setUpConnection(); + sendBidirectionalDataAndCleanShutdown(); } TEST_P(ConnectTerminationIntegrationTest, BasicAllowPost) { @@ -103,18 +121,7 @@ TEST_P(ConnectTerminationIntegrationTest, BasicAllowPost) { connect_headers_.removeProtocol(); setUpConnection(); - sendBidirectionalData("hello", "hello", "there!", "there!"); - // Send a second set of data to make sure for example headers are only sent once. - sendBidirectionalData(",bye", "hello,bye", "ack", "there!ack"); - - // Send an end stream. This should result in half close upstream. - codec_client_->sendData(*request_encoder_, "", true); - ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); - - // Now send a FIN from upstream. This should result in clean shutdown downstream. - ASSERT_TRUE(fake_raw_upstream_connection_->close()); - ASSERT_TRUE(response_->waitForEndStream()); - ASSERT_FALSE(response_->reset()); + sendBidirectionalDataAndCleanShutdown(); } TEST_P(ConnectTerminationIntegrationTest, UsingHostMatch) { @@ -122,20 +129,10 @@ TEST_P(ConnectTerminationIntegrationTest, UsingHostMatch) { initialize(); connect_headers_.removePath(); + connect_headers_.removeProtocol(); setUpConnection(); - sendBidirectionalData("hello", "hello", "there!", "there!"); - // Send a second set of data to make sure for example headers are only sent once. - sendBidirectionalData(",bye", "hello,bye", "ack", "there!ack"); - - // Send an end stream. This should result in half close upstream. - codec_client_->sendData(*request_encoder_, "", true); - ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); - - // Now send a FIN from upstream. This should result in clean shutdown downstream. - ASSERT_TRUE(fake_raw_upstream_connection_->close()); - ASSERT_TRUE(response_->waitForEndStream()); - ASSERT_FALSE(response_->reset()); + sendBidirectionalDataAndCleanShutdown(); } TEST_P(ConnectTerminationIntegrationTest, DownstreamClose) { @@ -150,6 +147,10 @@ TEST_P(ConnectTerminationIntegrationTest, DownstreamClose) { } TEST_P(ConnectTerminationIntegrationTest, DownstreamReset) { + if (downstream_protocol_ == Http::CodecType::HTTP1) { + // Resetting an individual stream requires HTTP/2 or later. + return; + } initialize(); setUpConnection(); @@ -168,7 +169,13 @@ TEST_P(ConnectTerminationIntegrationTest, UpstreamClose) { // Tear down by closing the upstream connection. ASSERT_TRUE(fake_raw_upstream_connection_->close()); - ASSERT_TRUE(response_->waitForReset()); + if (downstream_protocol_ == Http::CodecType::HTTP3) { + // In HTTP/3 end stream will be sent when the upstream connection is closed, and + // STOP_SENDING frame sent instead of reset. + ASSERT_TRUE(response_->waitForEndStream()); + } else { + ASSERT_TRUE(response_->waitForReset()); + } } TEST_P(ConnectTerminationIntegrationTest, TestTimeout) { @@ -183,6 +190,9 @@ TEST_P(ConnectTerminationIntegrationTest, TestTimeout) { } TEST_P(ConnectTerminationIntegrationTest, BuggyHeaders) { + if (downstream_protocol_ == Http::CodecType::HTTP1) { + return; + } initialize(); // Sending a header-only request is probably buggy, but rather than having a @@ -239,7 +249,10 @@ class ProxyingConnectIntegrationTest : public HttpProtocolIntegrationTest { void initialize() override { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { ConfigHelper::setConnectConfig(hcm, false, false); }); + hcm) -> void { + ConfigHelper::setConnectConfig(hcm, false, false, + downstream_protocol_ == Http::CodecType::HTTP3); + }); HttpProtocolIntegrationTest::initialize(); } @@ -253,7 +266,10 @@ class ProxyingConnectIntegrationTest : public HttpProtocolIntegrationTest { }; INSTANTIATE_TEST_SUITE_P(Protocols, ProxyingConnectIntegrationTest, - testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()), + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP1, Http::CodecType::HTTP2, + Http::CodecType::HTTP3}, + {Http::CodecType::HTTP1})), HttpProtocolIntegrationTest::protocolTestParamsToString); TEST_P(ProxyingConnectIntegrationTest, ProxyConnect) { @@ -437,29 +453,20 @@ TEST_P(ProxyingConnectIntegrationTest, ProxyConnectWithIP) { cleanupUpstreamAndDownstream(); } -INSTANTIATE_TEST_SUITE_P(IpVersions, ConnectTerminationIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(HttpAndIpVersions, ConnectTerminationIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP1, Http::CodecType::HTTP2, + Http::CodecType::HTTP3}, + {Http::CodecType::HTTP1})), + HttpProtocolIntegrationTest::protocolTestParamsToString); using Params = std::tuple; // Tunneling downstream TCP over an upstream HTTP CONNECT tunnel. -class TcpTunnelingIntegrationTest : public testing::TestWithParam, - public HttpIntegrationTest { +class TcpTunnelingIntegrationTest : public HttpProtocolIntegrationTest { public: - TcpTunnelingIntegrationTest() - : HttpIntegrationTest(Http::CodecType::HTTP2, std::get<0>(GetParam())) {} - - static std::string paramsToString(const testing::TestParamInfo& p) { - return fmt::format( - "{}_{}", std::get<0>(p.param) == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6", - std::get<1>(p.param) == Http::CodecType::HTTP1 ? "HTTP1Upstream" : "HTTP2Upstream"); - } - void SetUp() override { enableHalfClose(true); - setDownstreamProtocol(Http::CodecType::HTTP2); - setUpstreamProtocol(std::get<1>(GetParam())); config_helper_.addConfigModifier( [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { @@ -471,8 +478,7 @@ class TcpTunnelingIntegrationTest : public testing::TestWithParam, auto* listener = bootstrap.mutable_static_resources()->add_listeners(); listener->set_name("tcp_proxy"); auto* socket_address = listener->mutable_address()->mutable_socket_address(); - socket_address->set_address( - Network::Test::getLoopbackAddressString(std::get<0>(GetParam()))); + socket_address->set_address(Network::Test::getLoopbackAddressString(version_)); socket_address->set_port_value(0); auto* filter_chain = listener->add_filter_chains(); @@ -480,6 +486,7 @@ class TcpTunnelingIntegrationTest : public testing::TestWithParam, filter->mutable_typed_config()->PackFrom(proxy_config); filter->set_name("envoy.filters.network.tcp_proxy"); }); + HttpProtocolIntegrationTest::SetUp(); } }; @@ -735,6 +742,13 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyDownstreamFlush) { // Test that an upstream flush works correctly (all data is flushed) TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { + if (upstreamProtocol() == Http::CodecType::HTTP3) { + // The payload data depends on having TCP buffers upstream and downstream. + // For HTTP/3, upstream, the flow control window will back up sooner, Envoy + // flow control will kick in, and the large write of |data| will fail to + // complete. + return; + } // Use a very large size to make sure it is larger than the kernel socket read buffer. const uint32_t size = 50 * 1024 * 1024; config_helper_.setBufferLimits(size, size); @@ -772,8 +786,8 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { } } -// Test that h2 connection is reused. -TEST_P(TcpTunnelingIntegrationTest, H2ConnectionReuse) { +// Test that h2/h3 connection is reused. +TEST_P(TcpTunnelingIntegrationTest, ConnectionReuse) { if (upstreamProtocol() == Http::CodecType::HTTP1) { return; } @@ -820,7 +834,7 @@ TEST_P(TcpTunnelingIntegrationTest, H2ConnectionReuse) { // Test that with HTTP1 we have no connection reuse with downstream close. TEST_P(TcpTunnelingIntegrationTest, H1NoConnectionReuse) { - if (upstreamProtocol() == Http::CodecType::HTTP2) { + if (upstreamProtocol() != Http::CodecType::HTTP1) { return; } initialize(); @@ -905,7 +919,7 @@ TEST_P(TcpTunnelingIntegrationTest, H1UpstreamCloseNoConnectionReuse) { } TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { - if (upstreamProtocol() == Http::CodecType::HTTP2) { + if (upstreamProtocol() != Http::CodecType::HTTP1) { return; } initialize(); @@ -935,7 +949,7 @@ TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { } TEST_P(TcpTunnelingIntegrationTest, ContentLengthHeaderIgnoredHttp1) { - if (upstreamProtocol() == Http::CodecType::HTTP2) { + if (upstreamProtocol() != Http::CodecType::HTTP1) { return; } initialize(); @@ -964,7 +978,7 @@ TEST_P(TcpTunnelingIntegrationTest, ContentLengthHeaderIgnoredHttp1) { } TEST_P(TcpTunnelingIntegrationTest, TransferEncodingHeaderIgnoredHttp1) { - if (upstreamProtocol() == Http::CodecType::HTTP2) { + if (upstreamProtocol() != Http::CodecType::HTTP1) { return; } initialize(); @@ -1066,11 +1080,11 @@ TEST_P(TcpTunnelingIntegrationTest, UpstreamDisconnectBeforeResponseReceived) { tcp_client->close(); } -INSTANTIATE_TEST_SUITE_P( - IpAndHttpVersions, TcpTunnelingIntegrationTest, - ::testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - testing::Values(Http::CodecType::HTTP1, Http::CodecType::HTTP2)), - TcpTunnelingIntegrationTest::paramsToString); - +INSTANTIATE_TEST_SUITE_P(IpAndHttpVersions, TcpTunnelingIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP1}, + {Http::CodecType::HTTP1, Http::CodecType::HTTP2, + Http::CodecType::HTTP3})), + HttpProtocolIntegrationTest::protocolTestParamsToString); } // namespace } // namespace Envoy diff --git a/test/integration/weighted_cluster_integration_test.cc b/test/integration/weighted_cluster_integration_test.cc new file mode 100644 index 0000000000000..e43d784419b70 --- /dev/null +++ b/test/integration/weighted_cluster_integration_test.cc @@ -0,0 +1,165 @@ +#include +#include +#include + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" + +#include "test/integration/filters/repick_cluster_filter.h" +#include "test/integration/http_integration.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +class WeightedClusterIntegrationTest : public testing::Test, public HttpIntegrationTest { +public: + WeightedClusterIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, Network::Address::IpVersion::v6) {} + + void createUpstreams() override { + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + // Add two fake upstreams + for (int i = 0; i < 2; ++i) { + addFakeUpstream(FakeHttpConnection::Type::HTTP2); + } + } + + void initializeConfig(const std::vector& weights) { + // Set the cluster configuration for `cluster_1` + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* cluster = bootstrap.mutable_static_resources()->add_clusters(); + cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + cluster->set_name(std::string(Envoy::RepickClusterFilter::ClusterName)); + ConfigHelper::setHttp2(*cluster); + }); + + // Add the custom filter. + config_helper_.addFilter("name: repick-cluster-filter"); + + // Modify route with weighted cluster configuration. + config_helper_.addConfigModifier( + [&weights]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* weighted_clusters = hcm.mutable_route_config() + ->mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->mutable_weighted_clusters(); + + // Add a cluster with `name` specified. + auto* cluster = weighted_clusters->add_clusters(); + cluster->set_name("cluster_0"); + cluster->mutable_weight()->set_value(weights[0]); + + // Add a cluster with `cluster_header` specified. + cluster = weighted_clusters->add_clusters(); + cluster->set_cluster_header(std::string(Envoy::RepickClusterFilter::ClusterHeaderName)); + cluster->mutable_weight()->set_value(weights[1]); + + weighted_clusters->mutable_total_weight()->set_value( + std::accumulate(weights.begin(), weights.end(), 0UL)); + }); + + HttpIntegrationTest::initialize(); + } + + const std::vector& getDefaultWeights() { return default_weights_; } + + void sendRequestAndValidateResponse(const std::vector& upstream_indices) { + // Create a client aimed at Envoy’s default HTTP port. + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + + // Create some request headers. + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}; + + // Send the request headers from the client, wait until they are received + // upstream. When they are received, send the default response headers from + // upstream and wait until they are received at by client. + IntegrationStreamDecoderPtr response = sendRequestAndWaitForResponse( + request_headers, 0, default_response_headers_, 0, upstream_indices); + + // Verify the proxied request was received upstream, as expected. + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + // Verify the proxied response was received downstream, as expected. + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ(0U, response->body().size()); + + // Perform the clean-up. + cleanupUpstreamAndDownstream(); + } + +private: + std::vector default_weights_ = {20, 30}; +}; + +// Steer the traffic (i.e. send the request) to the weighted cluster with `name` specified. +TEST_F(WeightedClusterIntegrationTest, SteerTrafficToOneClusterWithName) { + setDeterministicValue(); + initializeConfig(getDefaultWeights()); + + // The expected destination cluster upstream is index 0 since the selected + // value is set to 0 indirectly via `setDeterministicValue()` above to set the weight to 0. + sendRequestAndValidateResponse({0}); + + // Check that the expected upstream cluster has incoming request. + EXPECT_EQ(test_server_->counter("cluster.cluster_0.upstream_cx_total")->value(), 1); +} + +// Steer the traffic (i.e. send the request) to the weighted cluster with `cluster_header` +// specified. +TEST_F(WeightedClusterIntegrationTest, SteerTrafficToOneClusterWithHeader) { + const std::vector& default_weights = getDefaultWeights(); + + // The index of the cluster with `cluster_header` specified is 1. + int cluster_header_index = 1; + // Set the deterministic value to the accumulation of the weights of all clusters with + // `name`, so we can route the traffic to the first cluster with `cluster_header` based on + // weighted cluster selection algorithm in `RouteEntryImplBase::pickWeightedCluster()`. + uint64_t deterministric_value = + std::accumulate(default_weights.begin(), default_weights.begin() + cluster_header_index, 0UL); + setDeterministicValue(deterministric_value); + + initializeConfig(default_weights); + + sendRequestAndValidateResponse({static_cast(cluster_header_index)}); + + // Check that the expected upstream cluster has incoming request. + std::string target_name = + absl::StrFormat("cluster.cluster_%d.upstream_cx_total", cluster_header_index); + EXPECT_EQ(test_server_->counter(target_name)->value(), 1); +} + +// Steer the traffic (i.e. send the request) to the weighted clusters randomly based on weight. +TEST_F(WeightedClusterIntegrationTest, SplitTrafficRandomly) { + std::vector weights = {50, 50}; + int upstream_count = weights.size(); + initializeConfig(weights); + + std::vector upstream_indices(upstream_count); + std::iota(std::begin(upstream_indices), std::end(upstream_indices), 0); + int request_num = 20; + for (int i = 0; i < request_num; ++i) { + // The expected destination cluster upstream is randomly selected based on + // weight, so all the upstreams needs to be available for selection. + sendRequestAndValidateResponse(upstream_indices); + } + + std::string target_name; + // Check that all the upstream clusters have been routed to at least once. + for (int i = 0; i < upstream_count; ++i) { + target_name = absl::StrFormat("cluster.cluster_%d.upstream_cx_total", i); + EXPECT_GE(test_server_->counter(target_name)->value(), 1); + } +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 7a11e6a156470..f0a0dec1f52e4 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -597,6 +597,84 @@ TEST_P(LdsIntegrationTest, NewListenerWithBadPostListenSocketOption) { test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); } +// Verify the grpc cached logger is available after the initial logger filter is destroyed. +// Regression test for https://github.com/envoyproxy/envoy/issues/18066 +TEST_P(LdsIntegrationTest, GrpcLoggerSurvivesAfterReloadConfig) { + autonomous_upstream_ = true; + // The grpc access logger connection never closes. It's ok to see an incomplete logging stream. + autonomous_allow_incomplete_streams_ = true; + + const std::string grpc_logger_string = R"EOF( + name: grpc_accesslog + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.grpc.v3.HttpGrpcAccessLogConfig + common_config: + log_name: bar + transport_api_version: V3 + grpc_service: + envoy_grpc: + cluster_name: cluster_0 + )EOF"; + + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->set_stat_prefix("listener_0"); + }); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { TestUtility::loadFromYaml(grpc_logger_string, *hcm.add_access_log()); }); + initialize(); + // Given we're using LDS in this test, initialize() will not complete until + // the initial LDS file has loaded. + EXPECT_EQ(1, test_server_->counter("listener_manager.lds.update_success")->value()); + + // HTTP 1.1 is allowed and the connection is kept open until the listener update. + std::string response; + auto connection = + createConnectionDriver(lookupPort("http"), "GET / HTTP/1.1\r\nHost: host\r\n\r\n", + [&response, &dispatcher = *dispatcher_]( + Network::ClientConnection&, const Buffer::Instance& data) -> void { + response.append(data.toString()); + if (response.find("\r\n\r\n") != std::string::npos) { + dispatcher.exit(); + } + }); + connection->run(); + EXPECT_TRUE(response.find("HTTP/1.1 200") == 0); + + test_server_->waitForCounterEq("access_logs.grpc_access_log.logs_written", 1); + + // Create a new config with HTTP/1.0 proxying. The goal is to trigger a listener update. + ConfigHelper new_config_helper( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + hcm.mutable_http_protocol_options()->set_accept_http_10(true); + hcm.mutable_http_protocol_options()->set_default_host_for_http_10("default.com"); + }); + + // Create an LDS response with the new config, and reload config. + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForCounterEq("listener_manager.lds.update_success", 2); + + // Wait until the http 1.1 connection is destroyed due to the listener update. It indicates the + // listener starts draining. + test_server_->waitForGaugeEq("listener.listener_0.downstream_cx_active", 0); + // Wait until all the draining filter chain is gone. It indicates the old listener and filter + // chains are destroyed. + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + + // Verify that the new listener config is applied. + std::string response2; + sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.0\r\n\r\n", &response2, true); + EXPECT_THAT(response2, HasSubstr("HTTP/1.0 200 OK\r\n")); + + // Verify that the grpc access logger is available after the listener update. + test_server_->waitForCounterEq("access_logs.grpc_access_log.logs_written", 2); +} + // Sample test making sure our config framework informs on listener failure. TEST_P(LdsIntegrationTest, FailConfigLoad) { config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { diff --git a/test/mocks/api/mocks.h b/test/mocks/api/mocks.h index ef6f02c999cbd..d2d54c13ff3f7 100644 --- a/test/mocks/api/mocks.h +++ b/test/mocks/api/mocks.h @@ -50,6 +50,7 @@ class MockApi : public Api { MOCK_METHOD(Random::RandomGenerator&, randomGenerator, ()); MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, bootstrap, (), (const)); MOCK_METHOD(ProcessContextOptRef, processContext, ()); + MOCK_METHOD(Stats::CustomStatNamespaces&, customStatNamespaces, ()); testing::NiceMock file_system_; Event::GlobalTimeSystem time_system_; diff --git a/test/mocks/common.cc b/test/mocks/common.cc index fe64936013253..3f6c122d3671f 100644 --- a/test/mocks/common.cc +++ b/test/mocks/common.cc @@ -14,6 +14,11 @@ namespace Random { MockRandomGenerator::MockRandomGenerator() { ON_CALL(*this, uuid()).WillByDefault(Return(uuid_)); } +MockRandomGenerator::MockRandomGenerator(uint64_t value) : value_(value) { + ON_CALL(*this, random()).WillByDefault(Return(value_)); + ON_CALL(*this, uuid()).WillByDefault(Return(uuid_)); +} + MockRandomGenerator::~MockRandomGenerator() = default; } // namespace Random diff --git a/test/mocks/common.h b/test/mocks/common.h index b887865fe9bf5..1fec75bb256cb 100644 --- a/test/mocks/common.h +++ b/test/mocks/common.h @@ -110,11 +110,13 @@ namespace Random { class MockRandomGenerator : public RandomGenerator { public: MockRandomGenerator(); + MockRandomGenerator(uint64_t value); ~MockRandomGenerator() override; MOCK_METHOD(uint64_t, random, ()); MOCK_METHOD(std::string, uuid, ()); + uint64_t value_; const std::string uuid_{"a121e9e1-feae-4136-9e0e-6fac343d56c9"}; }; diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index f70fc602d5480..9a8f04cc23d1c 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -19,6 +19,7 @@ namespace Event { MockDispatcher::MockDispatcher() : MockDispatcher("test_thread") {} MockDispatcher::MockDispatcher(const std::string& name) : name_(name) { + time_system_ = std::make_unique(); ON_CALL(*this, initializeStats(_, _)).WillByDefault(Return()); ON_CALL(*this, clearDeferredDeleteList()).WillByDefault(Invoke([this]() -> void { to_delete_.clear(); diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index 26b1559e5ff95..fe7bb325436a2 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -37,7 +37,10 @@ class MockDispatcher : public Dispatcher { // Dispatcher const std::string& name() override { return name_; } - TimeSource& timeSource() override { return time_system_; } + TimeSource& timeSource() override { return *time_system_; } + GlobalTimeSystem& globalTimeSystem() { + return *(dynamic_cast(time_system_.get())); + } Network::ServerConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket, Network::TransportSocketPtr&& transport_socket, @@ -162,7 +165,7 @@ class MockDispatcher : public Dispatcher { MOCK_METHOD(void, updateApproximateMonotonicTime, ()); MOCK_METHOD(void, shutdown, ()); - GlobalTimeSystem time_system_; + std::unique_ptr time_system_; std::list to_delete_; testing::NiceMock buffer_factory_; bool allow_null_callback_{}; diff --git a/test/mocks/grpc/mocks.cc b/test/mocks/grpc/mocks.cc index 20605edb277e8..6b747a66611d4 100644 --- a/test/mocks/grpc/mocks.cc +++ b/test/mocks/grpc/mocks.cc @@ -7,7 +7,13 @@ namespace Grpc { MockAsyncClient::MockAsyncClient() { async_request_ = std::make_unique>(); - ON_CALL(*this, sendRaw(_, _, _, _, _, _)).WillByDefault(Return(async_request_.get())); + ON_CALL(*this, sendRaw(_, _, _, _, _, _)) + .WillByDefault(Invoke([this](absl::string_view, absl::string_view, Buffer::InstancePtr&&, + RawAsyncRequestCallbacks&, Tracing::Span&, + const Http::AsyncClient::RequestOptions&) { + send_count_++; + return async_request_.get(); + })); } MockAsyncClient::~MockAsyncClient() = default; diff --git a/test/mocks/grpc/mocks.h b/test/mocks/grpc/mocks.h index 76de0db3f2b92..cf4f244b9e5c4 100644 --- a/test/mocks/grpc/mocks.h +++ b/test/mocks/grpc/mocks.h @@ -90,6 +90,8 @@ class MockAsyncClient : public RawAsyncClient { const Http::AsyncClient::StreamOptions& options)); std::unique_ptr> async_request_; + // Keep track of the number of requests to detect potential race condition. + int send_count_{}; }; class MockAsyncClientFactory : public AsyncClientFactory { diff --git a/test/mocks/http/alternate_protocols_cache.h b/test/mocks/http/alternate_protocols_cache.h index 2f1f287685730..eab3481208454 100644 --- a/test/mocks/http/alternate_protocols_cache.h +++ b/test/mocks/http/alternate_protocols_cache.h @@ -11,7 +11,7 @@ class MockAlternateProtocolsCache : public AlternateProtocolsCache { ~MockAlternateProtocolsCache() override; MOCK_METHOD(void, setAlternatives, - (const Origin& origin, const std::vector& protocols)); + (const Origin& origin, std::vector& protocols)); MOCK_METHOD(OptRef>, findAlternatives, (const Origin& origin)); MOCK_METHOD(size_t, size, (), (const)); @@ -22,7 +22,8 @@ class MockAlternateProtocolsCacheManager : public AlternateProtocolsCacheManager ~MockAlternateProtocolsCacheManager() override; MOCK_METHOD(AlternateProtocolsCacheSharedPtr, getCache, - (const envoy::config::core::v3::AlternateProtocolsCacheOptions& config)); + (const envoy::config::core::v3::AlternateProtocolsCacheOptions& config, + Event::Dispatcher& dispatcher)); }; class MockAlternateProtocolsCacheManagerFactory : public AlternateProtocolsCacheManagerFactory { diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 83cf041e07020..11db5041411af 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -516,6 +516,7 @@ class MockFilterChainFactoryCallbacks : public Http::FilterChainFactoryCallbacks (Http::StreamFilterSharedPtr filter, Matcher::MatchTreeSharedPtr match_tree)); MOCK_METHOD(void, addAccessLogHandler, (AccessLog::InstanceSharedPtr handler)); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); }; class MockDownstreamWatermarkCallbacks : public DownstreamWatermarkCallbacks { diff --git a/test/mocks/protobuf/mocks.h b/test/mocks/protobuf/mocks.h index 3e61b31fed120..7a54e2c18d616 100644 --- a/test/mocks/protobuf/mocks.h +++ b/test/mocks/protobuf/mocks.h @@ -14,9 +14,9 @@ class MockValidationVisitor : public ValidationVisitor { MOCK_METHOD(void, onUnknownField, (absl::string_view)); MOCK_METHOD(void, onDeprecatedField, (absl::string_view, bool)); + MOCK_METHOD(void, onWorkInProgress, (absl::string_view)); bool skipValidation() override { return skip_validation_; } - void setSkipValidation(bool s) { skip_validation_ = s; } private: diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 98c1c6710b201..e9ccd1de05a2e 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -104,10 +104,15 @@ class TestRetryPolicy : public RetryPolicy { // Router::RetryPolicy std::chrono::milliseconds perTryTimeout() const override { return per_try_timeout_; } + std::chrono::milliseconds perTryIdleTimeout() const override { return per_try_idle_timeout_; } uint32_t numRetries() const override { return num_retries_; } uint32_t retryOn() const override { return retry_on_; } MOCK_METHOD(std::vector, retryHostPredicates, (), (const)); MOCK_METHOD(Upstream::RetryPrioritySharedPtr, retryPriority, (), (const)); + absl::Span + retryOptionsPredicates() const override { + return retry_options_predicates_; + } uint32_t hostSelectionMaxAttempts() const override { return host_selection_max_attempts_; } const std::vector& retriableStatusCodes() const override { return retriable_status_codes_; @@ -127,6 +132,7 @@ class TestRetryPolicy : public RetryPolicy { } std::chrono::milliseconds per_try_timeout_{0}; + std::chrono::milliseconds per_try_idle_timeout_{0}; uint32_t num_retries_{}; uint32_t retry_on_{}; uint32_t host_selection_max_attempts_; @@ -137,6 +143,7 @@ class TestRetryPolicy : public RetryPolicy { absl::optional max_interval_{}; std::vector reset_headers_{}; std::chrono::milliseconds reset_max_interval_{300000}; + std::vector retry_options_predicates_; }; class MockInternalRedirectPolicy : public InternalRedirectPolicy { diff --git a/test/mocks/router/router_filter_interface.h b/test/mocks/router/router_filter_interface.h index 55aa4d9a42210..d1349a4103ce1 100644 --- a/test/mocks/router/router_filter_interface.h +++ b/test/mocks/router/router_filter_interface.h @@ -31,6 +31,7 @@ class MockRouterFilterInterface : public RouterFilterInterface { UpstreamRequest& upstream_request)); MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host)); MOCK_METHOD(void, onPerTryTimeout, (UpstreamRequest & upstream_request)); + MOCK_METHOD(void, onPerTryIdleTimeout, (UpstreamRequest & upstream_request)); MOCK_METHOD(void, onStreamMaxDurationReached, (UpstreamRequest & upstream_request)); MOCK_METHOD(Envoy::Http::StreamDecoderFilterCallbacks*, callbacks, ()); diff --git a/test/mocks/server/factory_context.cc b/test/mocks/server/factory_context.cc index c8a3e082414cf..40b9648dda9f7 100644 --- a/test/mocks/server/factory_context.cc +++ b/test/mocks/server/factory_context.cc @@ -20,7 +20,7 @@ MockFactoryContext::MockFactoryContext() ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); ON_CALL(*this, getTransportSocketFactoryContext()) .WillByDefault(ReturnRef(transport_socket_factory_context_)); @@ -29,6 +29,7 @@ MockFactoryContext::MockFactoryContext() ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, serverScope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); @@ -40,6 +41,7 @@ MockFactoryContext::MockFactoryContext() ON_CALL(*this, messageValidationVisitor()) .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, options()).WillByDefault(ReturnRef(options_)); } MockFactoryContext::~MockFactoryContext() = default; diff --git a/test/mocks/server/factory_context.h b/test/mocks/server/factory_context.h index ec7e6f8659ef0..c5b104a1f5d79 100644 --- a/test/mocks/server/factory_context.h +++ b/test/mocks/server/factory_context.h @@ -25,7 +25,7 @@ class MockFactoryContext : public virtual FactoryContext { MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const)); MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(const Server::Options&, options, ()); MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); MOCK_METHOD(bool, healthCheckFailed, ()); @@ -33,6 +33,7 @@ class MockFactoryContext : public virtual FactoryContext { MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Stats::Scope&, serverScope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); MOCK_METHOD(OverloadManager&, overloadManager, ()); MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); @@ -64,6 +65,7 @@ class MockFactoryContext : public virtual FactoryContext { testing::NiceMock runtime_loader_; testing::NiceMock scope_; testing::NiceMock thread_local_; + testing::NiceMock options_; Singleton::ManagerPtr singleton_manager_; testing::NiceMock admin_; Stats::IsolatedStoreImpl listener_scope_; diff --git a/test/mocks/server/health_checker_factory_context.cc b/test/mocks/server/health_checker_factory_context.cc index f6a17d962e2ac..93d5611cdd0f7 100644 --- a/test/mocks/server/health_checker_factory_context.cc +++ b/test/mocks/server/health_checker_factory_context.cc @@ -14,7 +14,7 @@ using ::testing::ReturnRef; MockHealthCheckerFactoryContext::MockHealthCheckerFactoryContext() { event_logger_ = new testing::NiceMock(); ON_CALL(*this, cluster()).WillByDefault(ReturnRef(cluster_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_)); ON_CALL(*this, eventLogger_()).WillByDefault(Return(event_logger_)); diff --git a/test/mocks/server/health_checker_factory_context.h b/test/mocks/server/health_checker_factory_context.h index 35b94285948fa..15ac903feff66 100644 --- a/test/mocks/server/health_checker_factory_context.h +++ b/test/mocks/server/health_checker_factory_context.h @@ -23,7 +23,7 @@ class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryConte ~MockHealthCheckerFactoryContext() override; MOCK_METHOD(Upstream::Cluster&, cluster, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Upstream::HealthCheckEventLogger*, eventLogger_, ()); diff --git a/test/mocks/server/instance.cc b/test/mocks/server/instance.cc index f19f81dc4afcd..2351629bc4a86 100644 --- a/test/mocks/server/instance.cc +++ b/test/mocks/server/instance.cc @@ -61,11 +61,12 @@ MockServerFactoryContext::MockServerFactoryContext() : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), grpc_context_(scope_.symbolTable()), router_context_(scope_.symbolTable()) { ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, serverScope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h index 455e82eebfec5..de4f51099823c 100644 --- a/test/mocks/server/instance.h +++ b/test/mocks/server/instance.h @@ -149,12 +149,13 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { ~MockServerFactoryContext() override; MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(const Server::Options&, options, ()); MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Stats::Scope&, serverScope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); MOCK_METHOD(Server::Admin&, admin, ()); diff --git a/test/mocks/server/listener_factory_context.cc b/test/mocks/server/listener_factory_context.cc index bf2cc8992247c..a604a4bec5a68 100644 --- a/test/mocks/server/listener_factory_context.cc +++ b/test/mocks/server/listener_factory_context.cc @@ -20,7 +20,7 @@ MockListenerFactoryContext::MockListenerFactoryContext() ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); @@ -28,6 +28,7 @@ MockListenerFactoryContext::MockListenerFactoryContext() ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, serverScope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); diff --git a/test/mocks/server/listener_factory_context.h b/test/mocks/server/listener_factory_context.h index 095aad5931dcb..0fbaddf2bd3a3 100644 --- a/test/mocks/server/listener_factory_context.h +++ b/test/mocks/server/listener_factory_context.h @@ -26,7 +26,7 @@ class MockListenerFactoryContext : public ListenerFactoryContext { MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const)); MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(const Server::Options&, options, ()); MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); MOCK_METHOD(bool, healthCheckFailed, ()); @@ -35,6 +35,7 @@ class MockListenerFactoryContext : public ListenerFactoryContext { MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Stats::Scope&, serverScope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); MOCK_METHOD(OverloadManager&, overloadManager, ()); MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); diff --git a/test/mocks/server/transport_socket_factory_context.h b/test/mocks/server/transport_socket_factory_context.h index ee98720c8d428..fe31909626259 100644 --- a/test/mocks/server/transport_socket_factory_context.h +++ b/test/mocks/server/transport_socket_factory_context.h @@ -27,7 +27,7 @@ class MockTransportSocketFactoryContext : public TransportSocketFactoryContext { MOCK_METHOD(Stats::Scope&, scope, ()); MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(const Server::Options&, options, ()); MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Stats::Store&, stats, ()); diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index e7fcdef38adcf..e6c41713f5458 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -224,7 +224,9 @@ envoy_cc_mock( hdrs = ["cluster_manager_factory.h"], deps = [ "//envoy/upstream:cluster_manager_interface", + "//source/common/singleton:manager_impl_lib", "//test/mocks/secret:secret_mocks", + "//test/test_common:thread_factory_for_test_lib", ], ) diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index c24c81d95bc27..75b0f629e68f1 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -99,6 +99,7 @@ MockClusterInfo::MockClusterInfo() ON_CALL(*this, lbType()).WillByDefault(ReturnPointee(&lb_type_)); ON_CALL(*this, sourceAddress()).WillByDefault(ReturnRef(source_address_)); ON_CALL(*this, lbSubsetInfo()).WillByDefault(ReturnRef(lb_subset_)); + ON_CALL(*this, lbRoundRobinConfig()).WillByDefault(ReturnRef(lb_round_robin_config_)); ON_CALL(*this, lbRingHashConfig()).WillByDefault(ReturnRef(lb_ring_hash_config_)); ON_CALL(*this, lbMaglevConfig()).WillByDefault(ReturnRef(lb_maglev_config_)); ON_CALL(*this, lbOriginalDstConfig()).WillByDefault(ReturnRef(lb_original_dst_config_)); diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 5e5415f88472e..05846734280be 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -117,6 +117,8 @@ class MockClusterInfo : public ClusterInfo { lbRingHashConfig, (), (const)); MOCK_METHOD(const absl::optional&, lbMaglevConfig, (), (const)); + MOCK_METHOD(const absl::optional&, + lbRoundRobinConfig, (), (const)); MOCK_METHOD(const absl::optional&, lbLeastRequestConfig, (), (const)); MOCK_METHOD(const absl::optional&, @@ -194,6 +196,7 @@ class MockClusterInfo : public ClusterInfo { upstream_http_protocol_options_; absl::optional alternate_protocols_cache_options_; + absl::optional lb_round_robin_config_; absl::optional lb_ring_hash_config_; absl::optional lb_maglev_config_; absl::optional lb_original_dst_config_; diff --git a/test/mocks/upstream/cluster_manager.h b/test/mocks/upstream/cluster_manager.h index f8b43ddb76557..08f4c1c563283 100644 --- a/test/mocks/upstream/cluster_manager.h +++ b/test/mocks/upstream/cluster_manager.h @@ -70,6 +70,7 @@ class MockClusterManager : public ClusterManager { } MOCK_METHOD(void, drainConnections, (const std::string& cluster)); MOCK_METHOD(void, drainConnections, ()); + MOCK_METHOD(void, checkActiveStaticCluster, (const std::string& cluster)); NiceMock thread_local_cluster_; envoy::config::core::v3::BindConfig bind_config_; diff --git a/test/mocks/upstream/cluster_manager_factory.h b/test/mocks/upstream/cluster_manager_factory.h index a9354c97998ed..b4328b31beb12 100644 --- a/test/mocks/upstream/cluster_manager_factory.h +++ b/test/mocks/upstream/cluster_manager_factory.h @@ -2,7 +2,10 @@ #include "envoy/upstream/cluster_manager.h" +#include "source/common/singleton/manager_impl.h" + #include "test/mocks/secret/mocks.h" +#include "test/test_common/thread_factory_for_test.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -16,6 +19,7 @@ class MockClusterManagerFactory : public ClusterManagerFactory { ~MockClusterManagerFactory() override; Secret::MockSecretManager& secretManager() override { return secret_manager_; }; + Singleton::Manager& singletonManager() override { return singleton_manager_; } MOCK_METHOD(ClusterManagerPtr, clusterManagerFromProto, (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); @@ -44,6 +48,7 @@ class MockClusterManagerFactory : public ClusterManagerFactory { private: NiceMock secret_manager_; + Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; }; } // namespace Upstream } // namespace Envoy diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 49446ee5dc291..e4813c6917234 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -3,64 +3,85 @@ # directory:coverage_percent # for existing directories with low coverage. declare -a KNOWN_LOW_COVERAGE=( -"source/common:96.5" # Raise when QUIC coverage goes up -"source/common/api:75.3" -"source/common/api/posix:73.9" -"source/common/common/posix:94.1" +"source/common:96.0" # Raise when QUIC coverage goes up +"source/common/api:79.8" +"source/common/api/posix:78.5" +"source/common/common/posix:92.7" +"source/common/config:96.5" +"source/common/config/xds_mux:94.5" "source/common/crypto:0.0" -"source/common/event:94.2" # Emulated edge events guards don't report LCOV -"source/common/filesystem/posix:96.2" -"source/common/json:90.9" -"source/common/network:95.0" # Flaky, `activateFileEvents`, `startSecureTransport` and `ioctl` do not always report LCOV -"source/common/protobuf:94.7" -"source/common/signal:84.5" # Death tests don't report LCOV -"source/common/singleton:95.8" +"source/common/event:94.1" # Emulated edge events guards don't report LCOV +"source/common/filesystem/posix:95.5" +"source/common/http:96.3" +"source/common/http/http2:96.4" +"source/common/json:90.1" +"source/common/matcher:94.2" +"source/common/network:94.4" # Flaky, `activateFileEvents`, `startSecureTransport` and `ioctl`, listener_socket do not always report LCOV +"source/common/protobuf:95.3" +"source/common/quic:91.8" +"source/common/router:96.5" +"source/common/secret:94.9" +"source/common/signal:86.9" # Death tests don't report LCOV +"source/common/singleton:95.7" +"source/common/tcp:94.6" "source/common/thread:0.0" # Death tests don't report LCOV -"source/common/matcher:95.0" -"source/common/quic:91.2" "source/common/tracing:96.1" -"source/common/watchdog:42.9" # Death tests don't report LCOV -"source/common/config/xds_mux:94.5" -"source/exe:94.3" -"source/extensions/common/crypto:91.5" -"source/extensions/common/tap:95.9" +"source/common/upstream:96.2" +"source/common/watchdog:58.6" # Death tests don't report LCOV +"source/exe:92.6" +"source/extensions/common:95.9" +"source/extensions/common/tap:94.2" "source/extensions/common/wasm:95.3" # flaky: be careful adjusting -"source/extensions/common/wasm/null:77.8" -"source/extensions/common/wasm/v8:85.4" -"source/extensions/filters/common/expr:96.4" -"source/extensions/filters/common/fault:94.6" -"source/extensions/filters/common/rbac:88.6" -"source/extensions/filters/http/cache:92.6" -"source/extensions/filters/http/cache/simple_http_cache:95.6" -"source/extensions/filters/http/grpc_json_transcoder:95.6" -"source/extensions/filters/http/ip_tagging:91.2" -"source/extensions/filters/http/kill_request:85.0" # Death tests don't report LCOV -"source/extensions/filters/listener/tls_inspector:92.4" -"source/extensions/filters/network/common:96.2" -"source/extensions/filters/network/common/redis:96.3" -"source/extensions/filters/network/dubbo_proxy:96.2" -"source/extensions/filters/network/mongo_proxy:94.0" -"source/extensions/filters/network/sni_cluster:90.3" -"source/extensions/filters/network/sni_dynamic_forward_proxy:90.9" -"source/extensions/health_checkers:95.9" -"source/extensions/health_checkers/redis:95.9" -"source/extensions/quic_listeners:85.1" -"source/extensions/stat_sinks/graphite_statsd:85.7" -"source/extensions/stat_sinks/statsd:85.2" -"source/extensions/tracers/opencensus:94.2" -"source/extensions/tracers/xray:96.4" -"source/extensions/transport_sockets:95.8" -"source/extensions/transport_sockets/tls/cert_validator:96.5" -"source/extensions/transport_sockets/tls/private_key:76.9" -"source/extensions/transport_sockets/tls:95.2" +"source/extensions/common/wasm/ext:92.0" +"source/extensions/filters/common:96.1" +"source/extensions/filters/common/expr:96.2" +"source/extensions/filters/common/fault:94.5" +"source/extensions/filters/common/lua:96.5" +"source/extensions/filters/common/rbac:88.1" +"source/extensions/filters/http/cache:93.4" +"source/extensions/filters/http/cache/simple_http_cache:96.0" +"source/extensions/filters/http/grpc_json_transcoder:94.7" +"source/extensions/filters/http/ip_tagging:89.1" +"source/extensions/filters/http/kill_request:95.3" # Death tests don't report LCOV +"source/extensions/filters/http/lua:96.4" +"source/extensions/filters/http/wasm:95.8" +"source/extensions/filters/listener:96.2" +"source/extensions/filters/listener/http_inspector:95.9" +"source/extensions/filters/listener/original_dst:93.3" +"source/extensions/filters/listener/tls_inspector:93.5" +"source/extensions/filters/network/common:96.0" +"source/extensions/filters/network/common/redis:96.2" +"source/extensions/filters/network/mongo_proxy:95.5" +"source/extensions/filters/network/sni_cluster:88.9" +"source/extensions/filters/network/sni_dynamic_forward_proxy:95.2" +"source/extensions/filters/network/thrift_proxy/router:96.4" +"source/extensions/filters/network/wasm:95.7" +"source/extensions/filters/udp:96.4" +"source/extensions/filters/udp/dns_filter:96.2" +"source/extensions/health_checkers:95.7" +"source/extensions/health_checkers/redis:95.7" +"source/extensions/io_socket:96.2" +"source/extensions/io_socket/user_space:96.2" +"source/extensions/stat_sinks/common:96.4" +"source/extensions/stat_sinks/common/statsd:96.4" +"source/extensions/stat_sinks/graphite_statsd:88.5" +"source/extensions/stat_sinks/statsd:88.0" +"source/extensions/tracers/opencensus:94.8" +"source/extensions/tracers/xray:96.2" +"source/extensions/tracers/zipkin:96.1" +"source/extensions/transport_sockets:95.3" +"source/extensions/transport_sockets/tls:94.5" +"source/extensions/transport_sockets/tls/cert_validator:96.0" +"source/extensions/transport_sockets/tls/ocsp:96.5" +"source/extensions/transport_sockets/tls/private_key:77.8" "source/extensions/wasm_runtime/wamr:0.0" # Not enabled in coverage build "source/extensions/wasm_runtime/wasmtime:0.0" # Not enabled in coverage build "source/extensions/wasm_runtime/wavm:0.0" # Not enabled in coverage build -"source/extensions/watchdog:85.7" # Death tests within extensions -"source/extensions/watchdog/profile_action:85.7" -"source/server:94.4" # flaky: be careful adjusting. See https://github.com/envoyproxy/envoy/issues/15239 -"source/server/admin:95.8" -"source/server/config_validation:79.2" +"source/extensions/watchdog:83.3" # Death tests within extensions +"source/extensions/watchdog/profile_action:83.3" +"source/server:93.5" # flaky: be careful adjusting. See https://github.com/envoyproxy/envoy/issues/15239 +"source/server/admin:95.3" +"source/server/config_validation:76.7" ) [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" diff --git a/test/proto/sensitive.proto b/test/proto/sensitive.proto index aefa73973f8e2..9d7726143e0b4 100644 --- a/test/proto/sensitive.proto +++ b/test/proto/sensitive.proto @@ -26,6 +26,8 @@ message Sensitive { udpa.type.v1.TypedStruct sensitive_typed_struct = 11 [(udpa.annotations.sensitive) = true]; repeated udpa.type.v1.TypedStruct sensitive_repeated_typed_struct = 12 [(udpa.annotations.sensitive) = true]; + map sensitive_string_map = 13 [(udpa.annotations.sensitive) = true]; + map sensitive_int_map = 14 [(udpa.annotations.sensitive) = true]; string insensitive_string = 101; repeated string insensitive_repeated_string = 102; @@ -39,4 +41,6 @@ message Sensitive { repeated google.protobuf.Any insensitive_repeated_any = 110; udpa.type.v1.TypedStruct insensitive_typed_struct = 111; repeated udpa.type.v1.TypedStruct insensitive_repeated_typed_struct = 112; + map insensitive_string_map = 113; + map insensitive_int_map = 114; } diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 7a315a1416359..2e7235ea4deb9 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -2,7 +2,7 @@ set -e -LLVM_VERSION="11.0.1" +LLVM_VERSION="12.0.1" CLANG_VERSION=$(clang --version | grep version | sed -e 's/\ *clang version \(.*\)\ */\1/') LLVM_COV_VERSION=$(llvm-cov --version | grep version | sed -e 's/\ *LLVM version \(.*\)/\1/') LLVM_PROFDATA_VERSION=$(llvm-profdata show --version | grep version | sed -e 's/\ *LLVM version \(.*\)/\1/') @@ -28,7 +28,7 @@ fi [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" [[ -z "${VALIDATE_COVERAGE}" ]] && VALIDATE_COVERAGE=true [[ -z "${FUZZ_COVERAGE}" ]] && FUZZ_COVERAGE=false -[[ -z "${COVERAGE_THRESHOLD}" ]] && COVERAGE_THRESHOLD=96.5 +[[ -z "${COVERAGE_THRESHOLD}" ]] && COVERAGE_THRESHOLD=96.2 COVERAGE_TARGET="${COVERAGE_TARGET:-}" read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTIONS:-}" @@ -92,7 +92,7 @@ fi if [[ "$VALIDATE_COVERAGE" == "true" ]]; then if [[ "${FUZZ_COVERAGE}" == "true" ]]; then - COVERAGE_THRESHOLD=27.0 + COVERAGE_THRESHOLD=24.0 fi COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) if [[ "${COVERAGE_FAILED}" -eq 1 ]]; then diff --git a/test/server/admin/prometheus_stats_test.cc b/test/server/admin/prometheus_stats_test.cc index 7824218e28bb7..92c40e7d386ca 100644 --- a/test/server/admin/prometheus_stats_test.cc +++ b/test/server/admin/prometheus_stats_test.cc @@ -1,5 +1,6 @@ #include +#include "source/common/stats/custom_stat_namespaces_impl.h" #include "source/server/admin/prometheus_stats.h" #include "test/mocks/stats/mocks.h" @@ -100,40 +101,47 @@ class PrometheusStatsFormatterTest : public testing::Test { }; TEST_F(PrometheusStatsFormatterTest, MetricName) { + Stats::CustomStatNamespacesImpl custom_namespaces; std::string raw = "vulture.eats-liver"; std::string expected = "envoy_vulture_eats_liver"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_TRUE(actual.has_value()); + EXPECT_EQ(expected, actual.value()); } TEST_F(PrometheusStatsFormatterTest, SanitizeMetricName) { + Stats::CustomStatNamespacesImpl custom_namespaces; std::string raw = "An.artist.plays-violin@019street"; std::string expected = "envoy_An_artist_plays_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_EQ(expected, actual.value()); } TEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) { + Stats::CustomStatNamespacesImpl custom_namespaces; std::string raw = "3.artists.play-violin@019street"; std::string expected = "envoy_3_artists_play_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_TRUE(actual.has_value()); + EXPECT_EQ(expected, actual.value()); } -TEST_F(PrometheusStatsFormatterTest, NamespaceRegistry) { - std::string raw = "vulture.eats-liver"; +TEST_F(PrometheusStatsFormatterTest, CustomNamespace) { + Stats::CustomStatNamespacesImpl custom_namespaces; + custom_namespaces.registerStatNamespace("promstattest"); + std::string raw = "promstattest.vulture.eats-liver"; std::string expected = "vulture_eats_liver"; + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_TRUE(actual.has_value()); + EXPECT_EQ(expected, actual.value()); +} - EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace("3vulture")); - EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace(".vulture")); - - EXPECT_FALSE(PrometheusStatsFormatter::unregisterPrometheusNamespace("vulture")); - EXPECT_TRUE(PrometheusStatsFormatter::registerPrometheusNamespace("vulture")); - EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace("vulture")); - EXPECT_EQ(expected, PrometheusStatsFormatter::metricName(raw)); - EXPECT_TRUE(PrometheusStatsFormatter::unregisterPrometheusNamespace("vulture")); - - EXPECT_EQ("envoy_" + expected, PrometheusStatsFormatter::metricName(raw)); +TEST_F(PrometheusStatsFormatterTest, CustomNamespaceWithInvalidPromnamespace) { + Stats::CustomStatNamespacesImpl custom_namespaces; + custom_namespaces.registerStatNamespace("promstattest"); + std::string raw = "promstattest.1234abcd.eats-liver"; + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_FALSE(actual.has_value()); } TEST_F(PrometheusStatsFormatterTest, FormattedTags) { @@ -148,6 +156,7 @@ TEST_F(PrometheusStatsFormatterTest, FormattedTags) { } TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { + Stats::CustomStatNamespacesImpl custom_namespaces; // Create two counters and two gauges with each pair having the same name, // but having different tag names and values. @@ -163,12 +172,13 @@ TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(2UL, size); } TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { + Stats::CustomStatNamespacesImpl custom_namespaces; // Create two counters and two gauges, all with unique names. // statsAsPrometheus() should return four implying it found @@ -184,12 +194,13 @@ TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(4UL, size); } TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { + Stats::CustomStatNamespacesImpl custom_namespaces; HistogramWrapper h1_cumulative; h1_cumulative.setHistogramValues(std::vector(0)); Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); @@ -200,8 +211,8 @@ TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { addHistogram(histogram); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram @@ -234,6 +245,7 @@ envoy_histogram1_count{} 0 } TEST_F(PrometheusStatsFormatterTest, HistogramWithNonDefaultBuckets) { + Stats::CustomStatNamespacesImpl custom_namespaces; HistogramWrapper h1_cumulative; h1_cumulative.setHistogramValues(std::vector(0)); Stats::ConstSupportedBuckets buckets{10, 20}; @@ -245,8 +257,8 @@ TEST_F(PrometheusStatsFormatterTest, HistogramWithNonDefaultBuckets) { addHistogram(histogram); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram @@ -262,6 +274,7 @@ envoy_histogram1_count{} 0 } TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { + Stats::CustomStatNamespacesImpl custom_namespaces; HistogramWrapper h1_cumulative; // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation. @@ -279,8 +292,8 @@ TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { addHistogram(histogram); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram @@ -313,14 +326,21 @@ envoy_histogram1_count{} 101100000 } TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { + Stats::CustomStatNamespacesImpl custom_namespaces; + custom_namespaces.registerStatNamespace("promtest"); + addCounter("cluster.test_1.upstream_cx_total", {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); addCounter("cluster.test_2.upstream_cx_total", {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addCounter("promtest.myapp.test.foo", {{makeStat("tag_name"), makeStat("tag-value")}}); addGauge("cluster.test_3.upstream_cx_total", {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); addGauge("cluster.test_4.upstream_cx_total", {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + addGauge("promtest.MYAPP.test.bar", {{makeStat("tag_name"), makeStat("tag-value")}}); + // Metric with invalid prometheus namespace in the custom metric must be excluded in the output. + addGauge("promtest.1234abcd.test.bar", {{makeStat("tag_name"), makeStat("tag-value")}}); const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; HistogramWrapper h1_cumulative; @@ -335,9 +355,9 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(5UL, size); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); + EXPECT_EQ(7UL, size); const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 @@ -345,12 +365,18 @@ envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 # TYPE envoy_cluster_test_2_upstream_cx_total counter envoy_cluster_test_2_upstream_cx_total{another_tag_name="another_tag-value"} 0 +# TYPE myapp_test_foo counter +myapp_test_foo{tag_name="tag-value"} 0 + # TYPE envoy_cluster_test_3_upstream_cx_total gauge envoy_cluster_test_3_upstream_cx_total{another_tag_name_3="another_tag_3-value"} 0 # TYPE envoy_cluster_test_4_upstream_cx_total gauge envoy_cluster_test_4_upstream_cx_total{another_tag_name_4="another_tag_4-value"} 0 +# TYPE MYAPP_test_bar gauge +MYAPP_test_bar{tag_name="tag-value"} 0 + # TYPE envoy_cluster_test_1_upstream_rq_time histogram envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 @@ -385,6 +411,7 @@ envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 // should be sorted by their tags; the format specifies that it is preferred that metrics // are always grouped in the same order, and sorting is an easy way to ensure this. TEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) { + Stats::CustomStatNamespacesImpl custom_namespaces; const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; HistogramWrapper h1_cumulative; h1_cumulative.setHistogramValues(h1_values); @@ -410,8 +437,8 @@ TEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) { } Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(6UL, size); const std::string expected_output = R"EOF(# TYPE envoy_cluster_upstream_cx_connect_fail counter @@ -576,6 +603,7 @@ envoy_cluster_upstream_rq_time_count{cluster="ccc"} 7 } TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { + Stats::CustomStatNamespacesImpl custom_namespaces; addCounter("cluster.test_1.upstream_cx_total", {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); addCounter("cluster.test_2.upstream_cx_total", @@ -598,8 +626,8 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - true, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, true, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_rq_time histogram @@ -632,6 +660,7 @@ envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 } TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { + Stats::CustomStatNamespacesImpl custom_namespaces; const std::vector h1_values = {}; HistogramWrapper h1_cumulative; h1_cumulative.setHistogramValues(h1_values); @@ -649,8 +678,8 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { EXPECT_CALL(*histogram1, cumulativeStatistics()).Times(0); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, used_only, absl::nullopt, custom_namespaces); EXPECT_EQ(0UL, size); } @@ -659,13 +688,14 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, used_only, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); } } TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { + Stats::CustomStatNamespacesImpl custom_namespaces; addCounter("cluster.test_1.upstream_cx_total", {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); addCounter("cluster.test_2.upstream_cx_total", @@ -687,9 +717,10 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { addHistogram(histogram1); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus( + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( counters_, gauges_, histograms_, response, false, - absl::optional{std::regex("cluster.test_1.upstream_cx_total")}); + absl::optional{std::regex("cluster.test_1.upstream_cx_total")}, + custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = diff --git a/test/server/config_validation/cluster_manager_test.cc b/test/server/config_validation/cluster_manager_test.cc index d0762c4425df7..5265850259702 100644 --- a/test/server/config_validation/cluster_manager_test.cc +++ b/test/server/config_validation/cluster_manager_test.cc @@ -32,7 +32,7 @@ TEST(ValidationClusterManagerTest, MockedMethods) { Event::SimulatedTimeSystem time_system; NiceMock validation_context; Api::ApiPtr api(Api::createApiForTest(stats_store, time_system)); - Server::MockOptions options; + NiceMock options; NiceMock runtime; NiceMock tls; NiceMock random; diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 442c38adc9999..5de495694bfc1 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -216,13 +216,13 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggablesocket_factory_, socketType()).WillOnce(Return(socket_type)); if (listener == nullptr) { // Expecting listener config in place update. // If so, dispatcher would not create new network listener. return listeners_.back().get(); } + EXPECT_CALL(listeners_.back()->socket_factory_, socketType()).WillOnce(Return(socket_type)); EXPECT_CALL(listeners_.back()->socket_factory_, getListenSocket(_)) .WillOnce(Return(listeners_.back()->socket_)); if (socket_type == Network::Socket::Type::Stream) { diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 53b2dcc962e6a..f3ed70f23015a 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -761,6 +761,8 @@ TEST_P(GuardDogActionsTest, MegaMissShouldSaturateOnMegaMissEvent) { EXPECT_THAT(events_, ElementsAre("MEGAMISS : 10", "MEGAMISS : 10")); } +// Disabled for coverage per #18229 +#if !defined(ENVOY_CONFIG_COVERAGE) TEST_P(GuardDogActionsTest, ShouldRespectEventPriority) { // Priority of events are KILL, MULTIKILL, MEGAMISS and MISS @@ -804,6 +806,7 @@ TEST_P(GuardDogActionsTest, ShouldRespectEventPriority) { guard_dog_->forceCheckForTest(); EXPECT_THAT(events_, ElementsAre("MEGAMISS : 10", "MISS : 10")); } +#endif TEST_P(GuardDogActionsTest, KillShouldTriggerGuardDogActions) { auto die_function = [&]() -> void { @@ -817,6 +820,8 @@ TEST_P(GuardDogActionsTest, KillShouldTriggerGuardDogActions) { EXPECT_DEATH(die_function(), "ASSERT_GUARDDOG_ACTION"); } +// Disabled for coverage per #18229 +#if !defined(ENVOY_CONFIG_COVERAGE) TEST_P(GuardDogActionsTest, MultikillShouldTriggerGuardDogActions) { auto die_function = [&]() -> void { const NiceMock config(DISABLE_MISS, DISABLE_MEGAMISS, DISABLE_KILL, @@ -830,6 +835,7 @@ TEST_P(GuardDogActionsTest, MultikillShouldTriggerGuardDogActions) { EXPECT_DEATH(die_function(), "ASSERT_GUARDDOG_ACTION"); } +#endif } // namespace } // namespace Server diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 43f0e9b0269e7..8bf8e13880543 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -297,7 +297,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) { port_value: 1234 } } - filter_chains: {} )EOF"; envoy::config::listener::v3::Listener listener_proto; EXPECT_TRUE(Protobuf::TextFormat::ParseFromString(proto_text, &listener_proto)); @@ -389,6 +388,22 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterConfig) { EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "foo: Cannot find field"); } + +TEST_F(ListenerManagerImplWithRealFiltersTest, BadConnectionLessUdpConfigWithFilterChain) { + const std::string yaml = R"EOF( +address: + socket_address: + protocol: UDP + address: 127.0.0.1 + port_value: 1234 +filter_chains: {} + )EOF"; + + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), + EnvoyException, + "1 filter chain\\(s\\) specified for connection-less UDP listener"); +} + class NonTerminalFilterFactory : public Configuration::NamedNetworkFilterConfigFactory { public: // Configuration::NamedNetworkFilterConfigFactory @@ -5102,9 +5117,9 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfWo EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); } -// This case also verifies that listeners that share port but do not share socket type (TCP vs. UDP) +// This case verifies that listeners that share port but do not share socket type (TCP vs. UDP) // do not share a listener. -TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAnyListenerIsNotTcp) { +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfDifferentSocketType) { EXPECT_CALL(*worker_, start(_, _)); manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); @@ -5117,12 +5132,15 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAn auto new_listener_proto = listener_proto; new_listener_proto.mutable_address()->mutable_socket_address()->set_protocol( envoy::config::core::v3::SocketAddress_Protocol::SocketAddress_Protocol_UDP); + EXPECT_CALL(server_.validation_context_, staticValidationVisitor()).Times(0); + EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor()); + EXPECT_CALL(listener_factory_, createDrainManager_(_)); + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(new_listener_proto, "", true), + EnvoyException, + "error adding listener '127.0.0.1:1234': 1 filter chain(s) specified " + "for connection-less UDP listener."); - ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); - expectUpdateToThenDrain(new_listener_proto, listener_foo, OptRef(), - ListenerComponentFactory::BindType::ReusePort); - expectRemove(new_listener_proto, listener_foo_update1, *listener_factory_.socket_); - + expectRemove(new_listener_proto, listener_foo, *listener_factory_.socket_); EXPECT_EQ(0UL, manager_->listeners().size()); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); } @@ -5204,8 +5222,6 @@ TEST_F(ListenerManagerImplTest, UdpDefaultWriterConfig) { address: 127.0.0.1 protocol: UDP port_value: 1234 -filter_chains: - filters: [] )EOF"); manager_->addOrUpdateListener(listener, "", true); EXPECT_EQ(1U, manager_->listeners().size()); diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index 07c5659aece2a..916b68b19f377 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -292,8 +292,8 @@ TEST_F(OptionsImplTest, OptionsAreInSyncWithProto) { // 5. hot restart version - print the hot restart version and exit. const uint32_t options_not_in_proto = 5; - // There are two deprecated options: "max_stats" and "max_obj_name_len". - const uint32_t deprecated_options = 2; + // There are no deprecated options currently, add here as needed. + const uint32_t deprecated_options = 0; EXPECT_EQ(options->count() - options_not_in_proto, command_line_options->GetDescriptor()->field_count() - deprecated_options); @@ -604,14 +604,14 @@ TEST(DisableExtensions, DEPRECATED_FEATURE_TEST(IsDisabled)) { OptionsImpl::disableExtensions({"no/such.factory"})); EXPECT_NE(Registry::FactoryRegistry::getFactory("test"), nullptr); - EXPECT_NE(Registry::FactoryRegistry::getFactory("test-1"), nullptr); - EXPECT_NE(Registry::FactoryRegistry::getFactory("test-2"), nullptr); + EXPECT_EQ(Registry::FactoryRegistry::getFactory("test-1"), nullptr); + EXPECT_EQ(Registry::FactoryRegistry::getFactory("test-2"), nullptr); EXPECT_NE(Registry::FactoryRegistry::getFactoryByType("google.protobuf.StringValue"), nullptr); EXPECT_NE(Registry::FactoryRegistry::getFactory("test"), nullptr); - EXPECT_NE(Registry::FactoryRegistry::getFactory("test-1"), nullptr); - EXPECT_NE(Registry::FactoryRegistry::getFactory("test-2"), nullptr); + EXPECT_EQ(Registry::FactoryRegistry::getFactory("test-1"), nullptr); + EXPECT_EQ(Registry::FactoryRegistry::getFactory("test-2"), nullptr); OptionsImpl::disableExtensions({"test/test", "testing/test-2"}); diff --git a/test/server/overload_manager_impl_test.cc b/test/server/overload_manager_impl_test.cc index 60b2214487c8e..0886e0181d5ff 100644 --- a/test/server/overload_manager_impl_test.cc +++ b/test/server/overload_manager_impl_test.cc @@ -99,7 +99,7 @@ class FakeResourceMonitorFactory : public Server::Configuration::ResourceMonitor Server::ResourceMonitorPtr createResourceMonitor(const Protobuf::Message&, Server::Configuration::ResourceMonitorFactoryContext& context) override { - auto monitor = std::make_unique(context.dispatcher()); + auto monitor = std::make_unique(context.mainThreadDispatcher()); monitor_ = monitor.get(); return monitor; } diff --git a/test/server/server_fuzz_test.cc b/test/server/server_fuzz_test.cc index 7795619951f21..11127df57c2d8 100644 --- a/test/server/server_fuzz_test.cc +++ b/test/server/server_fuzz_test.cc @@ -46,9 +46,6 @@ makeHermeticPathsAndPorts(Fuzz::PerTestEnvironment& test_env, // The header_prefix is a write-once then read-only singleton that persists across tests. We clear // this field so that fuzz tests don't fail over multiple iterations. output.clear_header_prefix(); - if (output.has_hidden_envoy_deprecated_runtime()) { - output.mutable_hidden_envoy_deprecated_runtime()->set_symlink_root(test_env.temporaryPath("")); - } for (auto& listener : *output.mutable_static_resources()->mutable_listeners()) { if (listener.has_address()) { makePortHermetic(test_env, *listener.mutable_address()); diff --git a/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml b/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml index d249a8dbb745f..e79bd34b52e7b 100644 --- a/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml +++ b/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml @@ -10,8 +10,3 @@ tracing: collector_cluster: zipkin collector_endpoint: "/api/v1/spans" collector_endpoint_version: HTTP_JSON -layered_runtime: - layers: - - name: static_layer - static_layer: - envoy.test_only.broken_in_production.enable_deprecated_v2_api: true diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 9e4ecbdeee869..a6df26363f633 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -55,6 +55,18 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "real_threads_test_helper_lib", + srcs = ["real_threads_test_helper.cc"], + hdrs = ["real_threads_test_helper.h"], + deps = [ + "utility_lib", + "//source/common/common:thread_lib", + "//source/common/event:dispatcher_lib", + "//source/common/thread_local:thread_local_lib", + ], +) + envoy_cc_test( name = "network_utility_test", srcs = ["network_utility_test.cc"], diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index be0dc2e76959a..e19a5ebb8af23 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -200,11 +200,7 @@ void TestEnvironment::initializeTestMain(char* program_name) { RELEASE_ASSERT(WSAStartup(version_requested, &wsa_data) == 0, ""); #endif -#ifdef __APPLE__ - UNREFERENCED_PARAMETER(program_name); -#else absl::InitializeSymbolizer(program_name); -#endif #ifdef ENVOY_HANDLE_SIGNALS // Enabled by default. Control with "bazel --define=signal_trace=disabled" diff --git a/test/test_common/real_threads_test_helper.cc b/test/test_common/real_threads_test_helper.cc new file mode 100644 index 0000000000000..3408b0c173b6e --- /dev/null +++ b/test/test_common/real_threads_test_helper.cc @@ -0,0 +1,110 @@ +#include "real_threads_test_helper.h" + +#include "absl/synchronization/barrier.h" +#include "utility.h" + +namespace Envoy { +namespace Thread { + +RealThreadsTestHelper::RealThreadsTestHelper(uint32_t num_threads) + : api_(Api::createApiForTest()), num_threads_(num_threads), + thread_factory_(api_->threadFactory()) { + // This is the same order as InstanceImpl::initialize in source/server/server.cc. + thread_dispatchers_.resize(num_threads_); + { + BlockingBarrier blocking_barrier(num_threads_ + 1); + main_thread_ = thread_factory_.createThread( + [this, &blocking_barrier]() { mainThreadFn(blocking_barrier); }); + for (uint32_t i = 0; i < num_threads_; ++i) { + threads_.emplace_back(thread_factory_.createThread( + [this, i, &blocking_barrier]() { workerThreadFn(i, blocking_barrier); })); + } + } + runOnMainBlocking([this]() { + tls_ = std::make_unique(); + tls_->registerThread(*main_dispatcher_, true); + for (Event::DispatcherPtr& dispatcher : thread_dispatchers_) { + // Worker threads must be registered from the main thread, per assert in registerThread(). + tls_->registerThread(*dispatcher, false); + } + }); +} + +std::function RealThreadsTestHelper::BlockingBarrier::run(std::function f) { + return [this, f]() { + f(); + decrementCount(); + }; +} + +std::function RealThreadsTestHelper::BlockingBarrier::decrementCountFn() { + return [this] { decrementCount(); }; +} + +void RealThreadsTestHelper::shutdownThreading() { + runOnMainBlocking([this]() { + if (!tls_->isShutdown()) { + tls_->shutdownGlobalThreading(); + } + tls_->shutdownThread(); + }); +} + +void RealThreadsTestHelper::exitThreads() { + for (Event::DispatcherPtr& dispatcher : thread_dispatchers_) { + dispatcher->post([&dispatcher]() { dispatcher->exit(); }); + } + + for (ThreadPtr& thread : threads_) { + thread->join(); + } + + main_dispatcher_->post([this]() { + tls_.reset(); + main_dispatcher_->exit(); + }); + main_thread_->join(); +} + +void RealThreadsTestHelper::runOnAllWorkersBlocking(std::function work) { + absl::Barrier start_barrier(num_threads_); + BlockingBarrier blocking_barrier(num_threads_); + for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { + thread_dispatcher->post(blocking_barrier.run([work, &start_barrier]() { + start_barrier.Block(); + work(); + })); + } +} + +void RealThreadsTestHelper::runOnMainBlocking(std::function work) { + BlockingBarrier blocking_barrier(1); + main_dispatcher_->post(blocking_barrier.run([work]() { work(); })); +} + +void RealThreadsTestHelper::mainDispatchBlock() { + // To ensure all stats are freed we have to wait for a few posts() to clear. + // First, wait for the main-dispatcher to initiate the cross-thread TLS cleanup. + runOnMainBlocking([]() {}); +} + +void RealThreadsTestHelper::tlsBlock() { + runOnAllWorkersBlocking([]() {}); +} + +void RealThreadsTestHelper::workerThreadFn(uint32_t thread_index, + BlockingBarrier& blocking_barrier) { + thread_dispatchers_[thread_index] = + api_->allocateDispatcher(absl::StrCat("test_worker_", thread_index)); + blocking_barrier.decrementCount(); + thread_dispatchers_[thread_index]->run(Event::Dispatcher::RunType::RunUntilExit); +} + +void RealThreadsTestHelper::mainThreadFn(BlockingBarrier& blocking_barrier) { + main_dispatcher_ = api_->allocateDispatcher("test_main_thread"); + blocking_barrier.decrementCount(); + main_dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); +} + +} // namespace Thread +} // namespace Envoy diff --git a/test/test_common/real_threads_test_helper.h b/test/test_common/real_threads_test_helper.h new file mode 100644 index 0000000000000..12444777efbd4 --- /dev/null +++ b/test/test_common/real_threads_test_helper.h @@ -0,0 +1,78 @@ +#include "source/common/event/dispatcher_impl.h" +#include "source/common/thread_local/thread_local_impl.h" + +#include "absl/synchronization/blocking_counter.h" + +namespace Envoy { +namespace Thread { + +class RealThreadsTestHelper { +protected: + // Helper class to block on a number of multi-threaded operations occurring. + class BlockingBarrier { + public: + explicit BlockingBarrier(uint32_t count) : blocking_counter_(count) {} + ~BlockingBarrier() { blocking_counter_.Wait(); } + + /** + * Returns a function that first executes 'f', and then decrements the count + * toward unblocking the scope. This is intended to be used as a post() callback. + * + * @param f the function to run prior to decrementing the count. + */ + std::function run(std::function f); + + /** + * @return a function that, when run, decrements the count, intended for passing to post(). + */ + std::function decrementCountFn(); + + void decrementCount() { blocking_counter_.DecrementCount(); } + + private: + absl::BlockingCounter blocking_counter_; + }; + + explicit RealThreadsTestHelper(uint32_t num_threads); + // TODO(chaoqin-li1123): Clean up threading resources from the destructor when we figure out how + // to handle different destruction orders of thread local object. + ~RealThreadsTestHelper() = default; + // Shutdown thread local instance. + void shutdownThreading(); + // Post exit signal and wait for main thread and worker threads to join. + void exitThreads(); + // Run the callback in all the workers, block until the callback has finished in all threads. + void runOnAllWorkersBlocking(std::function work); + // Run the callback in main thread, block until the callback has been executed in main thread. + void runOnMainBlocking(std::function work); + // Post an empty callback to main thread and block until all the previous callbacks have been + // executed. + void mainDispatchBlock(); + // Post an empty callback to worker threads and block until all the previous callbacks have been + // executed. + void tlsBlock(); + + ThreadLocal::Instance& tls() { return *tls_; } + + Api::Api& api() { return *api_; } + + // TODO(chaoqin-li1123): make these variables private when we figure out how to clean up the + // threading resources inside the helper class. + Api::ApiPtr api_; + Event::DispatcherPtr main_dispatcher_; + std::vector thread_dispatchers_; + ThreadLocal::InstanceImplPtr tls_; + ThreadPtr main_thread_; + std::vector threads_; + +private: + void workerThreadFn(uint32_t thread_index, BlockingBarrier& blocking_barrier); + + void mainThreadFn(BlockingBarrier& blocking_barrier); + + const uint32_t num_threads_; + ThreadFactory& thread_factory_; +}; + +} // namespace Thread +} // namespace Envoy diff --git a/test/test_common/resources.h b/test/test_common/resources.h index 9dcc8ec54b6be..323bbd9971a36 100644 --- a/test/test_common/resources.h +++ b/test/test_common/resources.h @@ -16,6 +16,7 @@ class TypeUrlValues { const std::string Cluster{"type.googleapis.com/envoy.config.cluster.v3.Cluster"}; const std::string ClusterLoadAssignment{ "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"}; + const std::string LbEndpoint{"type.googleapis.com/envoy.config.endpoint.v3.LbEndpoint"}; const std::string Secret{"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret"}; const std::string RouteConfiguration{ "type.googleapis.com/envoy.config.route.v3.RouteConfiguration"}; diff --git a/test/test_listener.cc b/test/test_listener.cc index 12dba6668f813..89c8c9251e5a2 100644 --- a/test/test_listener.cc +++ b/test/test_listener.cc @@ -14,6 +14,9 @@ void TestListener::OnTestEnd(const ::testing::TestInfo& test_info) { "]: Active singletons exist. Something is leaking. Consider " "commenting out this assert and letting the heap checker run:\n", active_singletons)); + RELEASE_ASSERT(!Thread::MainThread::isMainThreadActive(), + absl::StrCat("MainThreadLeak: [", test_info.test_suite_name(), ".", + test_info.name(), "] test exited before main thread shut down")); } } // namespace Envoy diff --git a/test/test_runner.cc b/test/test_runner.cc index c910d4fb505af..68c0b2dc6d4b1 100644 --- a/test/test_runner.cc +++ b/test/test_runner.cc @@ -73,6 +73,8 @@ class RuntimeManagingListener : public ::testing::EmptyTestEventListener { } // namespace int TestRunner::RunTests(int argc, char** argv) { + Thread::TestThread test_thread; + ::testing::InitGoogleMock(&argc, argv); // We hold on to process_wide to provide RAII cleanup of process-wide // state. diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 9a6535515fcab..3dedd6238e57a 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -30,8 +30,6 @@ const std::string toString(envoy::type::matcher::v3::StringMatcher::MatchPattern return "suffix"; case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSafeRegex: return "safe_regex"; - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex: - return "deprecated_regex"; case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kContains: return "contains"; case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::MATCH_PATTERN_NOT_SET: @@ -45,10 +43,6 @@ const std::string toString(const envoy::config::route::v3::HeaderMatcher& header case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kExactMatch: return "exact_match"; break; - case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase:: - kHiddenEnvoyDeprecatedRegexMatch: - return "regex_match"; - break; case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kSafeRegexMatch: return "safe_regex_match"; break; @@ -249,9 +243,9 @@ bool RouterCheckTool::compareEntries(const std::string& expected_routes) { headers_finalized_ = false; auto connection_info_provider = std::make_shared( nullptr, Network::Utility::getCanonicalIpv4LoopbackAddress()); - Envoy::StreamInfo::StreamInfoImpl stream_info(Envoy::Http::Protocol::Http11, - factory_context_->dispatcher().timeSource(), - connection_info_provider); + Envoy::StreamInfo::StreamInfoImpl stream_info( + Envoy::Http::Protocol::Http11, factory_context_->mainThreadDispatcher().timeSource(), + connection_info_provider); ToolConfig tool_config = ToolConfig::create(check_config); tool_config.route_ = config_->route(*tool_config.request_headers_, stream_info, tool_config.random_value_); diff --git a/test/tools/router_check/router_check.cc b/test/tools/router_check/router_check.cc index bba8b036b229e..8ac19702134c7 100644 --- a/test/tools/router_check/router_check.cc +++ b/test/tools/router_check/router_check.cc @@ -2,12 +2,14 @@ #include #include +#include "source/common/common/thread.h" #include "source/exe/platform_impl.h" #include "test/test_common/test_runtime.h" #include "test/tools/router_check/router.h" int main(int argc, char* argv[]) { + Envoy::Thread::TestThread test_thread; Envoy::Options options(argc, argv); const bool enforce_coverage = options.failUnder() != 0.0; diff --git a/test/tools/type_whisperer/api_type_db_test.cc b/test/tools/type_whisperer/api_type_db_test.cc index 9857a40311845..07ffbc0da33da 100644 --- a/test/tools/type_whisperer/api_type_db_test.cc +++ b/test/tools/type_whisperer/api_type_db_test.cc @@ -6,6 +6,8 @@ namespace Tools { namespace TypeWhisperer { namespace { +// TODO(htuch): removal API type DB. + // Validate that ApiTypeDb::getLatestTypeInformation returns nullopt when no // type information exists. TEST(ApiTypeDb, GetLatestTypeInformationForTypeUnknown) { @@ -13,14 +15,6 @@ TEST(ApiTypeDb, GetLatestTypeInformationForTypeUnknown) { EXPECT_EQ(absl::nullopt, unknown_type_information); } -// Validate that ApiTypeDb::getLatestTypeInformation fetches the latest type -// information when an upgrade occurs. -TEST(ApiTypeDb, GetLatestTypeInformationForTypeKnownUpgraded) { - const auto known_type_information = ApiTypeDb::getLatestTypeInformation("envoy.type.Int64Range"); - EXPECT_EQ("envoy.type.v3.Int64Range", known_type_information->type_name_); - EXPECT_EQ("envoy/type/v3/range.proto", known_type_information->proto_path_); -} - // Validate that ApiTypeDb::getLatestTypeInformation is idempotent when no // upgrade occurs. TEST(ApiTypeDb, GetLatestTypeInformationForTypeKnownNoUpgrade) { diff --git a/tools/api_boost/README.md b/tools/api_boost/README.md deleted file mode 100644 index 6a67e445c40b6..0000000000000 --- a/tools/api_boost/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Envoy API upgrades - -This directory contains tooling to support the [Envoy API versioning -guidelines](api/API_VERSIONING.md). Envoy internally tracks the latest API -version for any given package. Since each package may have a different API -version, and we have have > 15k of API protos, we require machine assistance to -scale the upgrade process. - -We refer to the process of upgrading Envoy to the latest version of the API as -*API boosting*. This is a manual process, where a developer wanting to bump -major version at the API clock invokes: - -```console -/tools/api_boost/api_boost.py --build_api_booster --generate_compilation_database -``` - -followed by `fix_format`. The full process is still WiP, but we expect that -there will be some manual fixup required of test cases (e.g. YAML fragments) as -well. - -You will need to configure `LLVM_CONFIG` as per the [Clang Libtooling setup -guide](tools/clang_tools/README.md). - -## Status - -The API boosting tooling is still WiP. It is slated to land in the v3 release -(EOY 2019), at which point it should be considered ready for general consumption -by experienced developers who work on Envoy APIs. diff --git a/tools/api_boost/api_boost.py b/tools/api_boost/api_boost.py deleted file mode 100755 index abee6f6e0e93a..0000000000000 --- a/tools/api_boost/api_boost.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env python3 - -# Tool that assists in upgrading the Envoy source tree to the latest API. -# Internally, Envoy uses the latest vN or vNalpha for a given package. Envoy -# will perform a reflection based version upgrade on any older protos that are -# presented to it in configuration at ingestion time. -# -# Usage (from a clean tree): -# -# api_boost.py --generate_compilation_database --build_api_booster - -import argparse -import functools -import json -import os -import multiprocessing as mp -import pathlib -import re -import shlex -import subprocess as sp - -# Detect API #includes. -API_INCLUDE_REGEX = re.compile('#include "(envoy/.*)/[^/]+\.pb\.(validate\.)?h"') - -# Needed for CI to pass down bazel options. -BAZEL_BUILD_OPTIONS = shlex.split(os.environ.get('BAZEL_BUILD_OPTIONS', '')) - - -# Obtain the directory containing a path prefix, e.g. ./foo/bar.txt is ./foo, -# ./foo/ba is ./foo, ./foo/bar/ is ./foo/bar. -def prefix_directory(path_prefix): - return path_prefix if os.path.isdir(path_prefix) else os.path.dirname(path_prefix) - - -# Update a C++ file to the latest API. -def api_boost_file(llvm_include_path, debug_log, path): - print('Processing %s' % path) - if 'API_NO_BOOST_FILE' in pathlib.Path(path).read_text(): - if debug_log: - print('Not boosting %s due to API_NO_BOOST_FILE\n' % path) - return None - # Run the booster - try: - result = sp.run([ - './bazel-bin/external/envoy_dev/clang_tools/api_booster/api_booster', - '--extra-arg-before=-xc++', - '--extra-arg=-isystem%s' % llvm_include_path, '--extra-arg=-Wno-undefined-internal', - '--extra-arg=-Wno-old-style-cast', path - ], - capture_output=True, - check=True) - except sp.CalledProcessError as e: - print('api_booster failure for %s: %s %s' % (path, e, e.stderr.decode('utf-8'))) - raise - if debug_log: - print(result.stderr.decode('utf-8')) - - # Consume stdout containing the list of inferred API headers. - return sorted(set(result.stdout.decode('utf-8').splitlines())) - - -# Rewrite API includes to the inferred headers. Currently this is handled -# outside of the clang-ast-replacements. In theory we could either integrate -# with this or with clang-include-fixer, but it's pretty simply to handle as done -# below, we have more control over special casing as well, so ¯\_(ツ)_/¯. -def rewrite_includes(args): - path, api_includes = args - # Files with API_NO_BOOST_FILE will have None returned by api_boost_file. - if api_includes is None: - return - # We just dump the inferred API header includes at the start of the #includes - # in the file and remove all the present API header includes. This does not - # match Envoy style; we rely on later invocations of fix_format.sh to take - # care of this alignment. - output_lines = [] - include_lines = ['#include "%s"' % f for f in api_includes] - input_text = pathlib.Path(path).read_text() - for line in input_text.splitlines(): - if include_lines and line.startswith('#include'): - output_lines.extend(include_lines) - include_lines = None - # Exclude API includes, except for a special case related to v2alpha - # ext_authz; this is needed to include the service descriptor in the build - # and is a hack that will go away when we remove v2. - if re.match(API_INCLUDE_REGEX, line) and 'envoy/service/auth/v2alpha' not in line: - continue - output_lines.append(line) - # Rewrite file. - pathlib.Path(path).write_text('\n'.join(output_lines) + '\n') - - -# Update the Envoy source tree the latest API. -def api_boost_tree( - target_paths, - generate_compilation_database=False, - build_api_booster=False, - debug_log=False, - sequential=False): - dep_build_targets = ['//%s/...' % prefix_directory(prefix) for prefix in target_paths] - - # Optional setup of state. We need the compilation database and api_booster - # tool in place before we can start boosting. - if generate_compilation_database: - print('Building compilation database for %s' % dep_build_targets) - sp.run(['./tools/gen_compilation_database.py', '--include_headers'] + dep_build_targets, - check=True) - - if build_api_booster: - # Similar to gen_compilation_database.py, we only need the cc_library for - # setup. The long term fix for this is in - # https://github.com/bazelbuild/bazel/issues/9578. - # - # Figure out some cc_libraries that cover most of our external deps. This is - # the same logic as in gen_compilation_database.py. - query = 'kind(cc_library, {})'.format(' union '.join(dep_build_targets)) - dep_lib_build_targets = sp.check_output(['bazel', 'query', query]).decode().splitlines() - # We also need some misc. stuff such as test binaries for setup of benchmark - # dep. - query = 'attr("tags", "compilation_db_dep", {})'.format(' union '.join(dep_build_targets)) - dep_lib_build_targets.extend( - sp.check_output(['bazel', 'query', query]).decode().splitlines()) - extra_api_booster_args = [] - if debug_log: - extra_api_booster_args.append('--copt=-DENABLE_DEBUG_LOG') - - # Slightly easier to debug when we build api_booster on its own. - sp.run([ - 'bazel', - 'build', - '--strip=always', - '@envoy_dev//clang_tools/api_booster', - ] + BAZEL_BUILD_OPTIONS + extra_api_booster_args, - check=True) - sp.run([ - 'bazel', - 'build', - '--strip=always', - ] + BAZEL_BUILD_OPTIONS + dep_lib_build_targets, - check=True) - - # Figure out where the LLVM include path is. We need to provide this - # explicitly as the api_booster is built inside the Bazel cache and doesn't - # know about this path. - # TODO(htuch): this is fragile and depends on Clang version, should figure out - # a cleaner approach. - llvm_include_path = os.path.join( - sp.check_output([os.getenv('LLVM_CONFIG'), '--libdir']).decode().rstrip(), - 'clang/11.0.1/include') - - # Determine the files in the target dirs eligible for API boosting, based on - # known files in the compilation database. - file_paths = set([]) - for entry in json.loads(pathlib.Path('compile_commands.json').read_text()): - file_path = entry['file'] - if any(file_path.startswith(prefix) for prefix in target_paths): - file_paths.add(file_path) - # Ensure a determinstic ordering if we are going to process sequentially. - if sequential: - file_paths = sorted(file_paths) - - # The API boosting is file local, so this is trivially parallelizable, use - # multiprocessing pool with default worker pool sized to cpu_count(), since - # this is CPU bound. - try: - with mp.Pool(processes=1 if sequential else None) as p: - # We need multiple phases, to ensure that any dependency on files being modified - # in one thread on consumed transitive headers on the other thread isn't an - # issue. This also ensures that we complete all analysis error free before - # any mutation takes place. - # TODO(htuch): we should move to run-clang-tidy.py once the headers fixups - # are Clang-based. - api_includes = p.map( - functools.partial(api_boost_file, llvm_include_path, debug_log), file_paths) - # Apply Clang replacements before header fixups, since the replacements - # are all relative to the original file. - for prefix_dir in set(map(prefix_directory, target_paths)): - sp.run(['clang-apply-replacements', prefix_dir], check=True) - # Fixup headers. - p.map(rewrite_includes, zip(file_paths, api_includes)) - finally: - # Cleanup any stray **/*.clang-replacements.yaml. - for prefix in target_paths: - clang_replacements = pathlib.Path( - prefix_directory(prefix)).glob('**/*.clang-replacements.yaml') - for path in clang_replacements: - path.unlink() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Update Envoy tree to the latest API') - parser.add_argument('--generate_compilation_database', action='store_true') - parser.add_argument('--build_api_booster', action='store_true') - parser.add_argument('--debug_log', action='store_true') - parser.add_argument('--sequential', action='store_true') - parser.add_argument('paths', nargs='*', default=['source', 'test', 'include']) - args = parser.parse_args() - api_boost_tree( - args.paths, - generate_compilation_database=args.generate_compilation_database, - build_api_booster=args.build_api_booster, - debug_log=args.debug_log, - sequential=args.sequential) diff --git a/tools/api_boost/api_boost_test.py b/tools/api_boost/api_boost_test.py deleted file mode 100755 index a6384a7bff1db..0000000000000 --- a/tools/api_boost/api_boost_test.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python3 - -# Golden C++ source tests for API boosting. This is effectively a test for the -# combination of api_boost.py, the Clang libtooling-based -# tools/clang_tools/api_booster, as well as the type whisperer and API type -# database. - -import argparse -from collections import namedtuple -import logging -import os -import pathlib -import shutil -import subprocess -import sys -import tempfile - -import api_boost - -TestCase = namedtuple('TestCase', ['name', 'description']) - -# List of test in the form [(file_name, explanation)] -TESTS = list( - map( - lambda x: TestCase(*x), [ - ('deprecate', 'Deprecations'), - ('elaborated_type', 'ElaboratedTypeLoc type upgrades'), - ('using_decl', 'UsingDecl upgrades for named types'), - ('rename', 'Annotation-based renaming'), - ('decl_ref_expr', 'DeclRefExpr upgrades for named constants'), - ('no_boost_file', 'API_NO_BOOST_FILE annotations'), - ('validate', 'Validation proto header inference'), - ])) - -TESTDATA_PATH = 'tools/api_boost/testdata' - - -def diff(some_path, other_path): - result = subprocess.run(['diff', '-u', some_path, other_path], capture_output=True) - if result.returncode == 0: - return None - return result.stdout.decode('utf-8') + result.stderr.decode('utf-8') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Golden C++ source tests for api_boost.py') - parser.add_argument('tests', nargs='*') - args = parser.parse_args() - - # Accumulated error messages. - logging.basicConfig(format='%(message)s') - messages = [] - - def should_run_test(test_name): - return len(args.tests) == 0 or test_name in args.tests - - # Run API booster against test artifacts in a directory relative to workspace. - # We use a temporary copy as the API booster does in-place rewriting. - with tempfile.TemporaryDirectory(dir=pathlib.Path.cwd()) as path: - # Setup temporary tree. - shutil.copy(os.path.join(TESTDATA_PATH, 'BUILD'), path) - for test in TESTS: - if should_run_test(test.name): - shutil.copy(os.path.join(TESTDATA_PATH, test.name + '.cc'), path) - else: - # Place an empty file to make Bazel happy. - pathlib.Path(path, test.name + '.cc').write_text('') - - # Run API booster. - relpath_to_testdata = str(pathlib.Path(path).relative_to(pathlib.Path.cwd())) - api_boost.api_boost_tree([ - os.path.join(relpath_to_testdata, test.name) - for test in TESTS - if should_run_test(test.name) - ], - generate_compilation_database=True, - build_api_booster=True, - debug_log=True, - sequential=True) - - # Validate output against golden files. - for test in TESTS: - if should_run_test(test.name): - delta = diff( - os.path.join(TESTDATA_PATH, test.name + '.cc.gold'), - os.path.join(path, test.name + '.cc')) - if delta is not None: - messages.append( - 'Non-empty diff for %s (%s):\n%s\n' % (test.name, test.description, delta)) - - if len(messages) > 0: - logging.error('FAILED:\n{}'.format('\n'.join(messages))) - sys.exit(1) - logging.warning('PASS') diff --git a/tools/api_boost/testdata/BUILD b/tools/api_boost/testdata/BUILD deleted file mode 100644 index f3e1298ff05b8..0000000000000 --- a/tools/api_boost/testdata/BUILD +++ /dev/null @@ -1,66 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_cc_library( - name = "decl_ref_expr", - srcs = ["decl_ref_expr.cc"], - deps = [ - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/api/v2/route:pkg_cc_proto", - "@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "deprecate", - srcs = ["deprecate.cc"], - deps = [ - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/api/v2/route:pkg_cc_proto", - "@envoy_api//envoy/type/matcher:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "elaborated_type", - srcs = ["elaborated_type.cc"], - deps = [ - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "rename", - srcs = ["rename.cc"], - deps = ["@envoy_api//envoy/api/v2/route:pkg_cc_proto"], -) - -envoy_cc_library( - name = "no_boost_file", - srcs = ["no_boost_file.cc"], - deps = ["@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto"], -) - -envoy_cc_library( - name = "using_decl", - srcs = ["using_decl.cc"], - deps = ["@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto"], -) - -envoy_cc_library( - name = "validate", - srcs = ["validate.cc"], - deps = [ - "//envoy/protobuf:message_validator_interface", - "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/api/v2:pkg_cc_proto", - ], -) diff --git a/tools/api_boost/testdata/decl_ref_expr.cc b/tools/api_boost/testdata/decl_ref_expr.cc deleted file mode 100644 index 9b644d08abea0..0000000000000 --- a/tools/api_boost/testdata/decl_ref_expr.cc +++ /dev/null @@ -1,44 +0,0 @@ -#include "envoy/api/v2/cds.pb.h" -#include "envoy/api/v2/route/route.pb.h" -#include "envoy/config/overload/v2alpha/overload.pb.h" - -#define API_NO_BOOST(x) x -#define BAR(x) x -#define ASSERT(x) static_cast(x) - -using envoy::config::overload::v2alpha::Trigger; - -using envoy::api::v2::Cluster; -using MutableStringClusterAccessor = std::string* (Cluster::*)(); - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const envoy::config::overload::v2alpha::Trigger& config) { - switch (config.trigger_oneof_case()) { - case envoy::config::overload::v2alpha::Trigger::kThreshold: - break; - default: - break; - } - switch (config.trigger_oneof_case()) { - case Trigger::kThreshold: - break; - default: - break; - } - API_NO_BOOST(envoy::api::v2::route::RouteAction) route_action; - route_action.host_rewrite(); - API_NO_BOOST(envoy::config::overload::v2alpha::Trigger) foo; - BAR(API_NO_BOOST(envoy::config::overload::v2alpha::Trigger)) bar; - BAR(envoy::config::overload::v2alpha::Trigger) baz; - envoy::config::overload::v2alpha::ThresholdTrigger::default_instance(); - ASSERT(envoy::config::overload::v2alpha::Trigger::kThreshold == Trigger::kThreshold); - ASSERT(Foo::kThreshold == Trigger::kThreshold); - envoy::api::v2::Cluster::LbPolicy_Name(0); - static_cast(envoy::api::v2::Cluster::MAGLEV); - MutableStringClusterAccessor foo2 = &envoy::api::v2::Cluster::mutable_name; - static_cast(foo2); - } - - using Foo = envoy::config::overload::v2alpha::Trigger; -}; diff --git a/tools/api_boost/testdata/decl_ref_expr.cc.gold b/tools/api_boost/testdata/decl_ref_expr.cc.gold deleted file mode 100644 index d7a337fc38b5b..0000000000000 --- a/tools/api_boost/testdata/decl_ref_expr.cc.gold +++ /dev/null @@ -1,45 +0,0 @@ -#include "envoy/api/v2/route/route_components.pb.h" -#include "envoy/config/cluster/v4alpha/cluster.pb.h" -#include "envoy/config/overload/v2alpha/overload.pb.h" -#include "envoy/config/overload/v3/overload.pb.h" - -#define API_NO_BOOST(x) x -#define BAR(x) x -#define ASSERT(x) static_cast(x) - -using envoy::config::overload::v3::Trigger; - -using envoy::config::cluster::v4alpha::Cluster; -using MutableStringClusterAccessor = std::string* (Cluster::*)(); - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const envoy::config::overload::v3::Trigger& config) { - switch (config.trigger_oneof_case()) { - case envoy::config::overload::v3::Trigger::TriggerOneofCase::kThreshold: - break; - default: - break; - } - switch (config.trigger_oneof_case()) { - case Trigger::kThreshold: - break; - default: - break; - } - API_NO_BOOST(envoy::api::v2::route::RouteAction) route_action; - route_action.host_rewrite(); - API_NO_BOOST(envoy::config::overload::v2alpha::Trigger) foo; - BAR(API_NO_BOOST(envoy::config::overload::v2alpha::Trigger)) bar; - BAR(envoy::config::overload::v3::Trigger) baz; - envoy::config::overload::v3::ThresholdTrigger::default_instance(); - ASSERT(envoy::config::overload::v3::Trigger::TriggerOneofCase::kThreshold == Trigger::kThreshold); - ASSERT(Foo::kThreshold == Trigger::kThreshold); - envoy::config::cluster::v4alpha::Cluster::LbPolicy_Name(0); - static_cast(envoy::config::cluster::v4alpha::Cluster::MAGLEV); - MutableStringClusterAccessor foo2 = &envoy::config::cluster::v4alpha::Cluster::mutable_name; - static_cast(foo2); - } - - using Foo = envoy::config::overload::v3::Trigger; -}; diff --git a/tools/api_boost/testdata/deprecate.cc b/tools/api_boost/testdata/deprecate.cc deleted file mode 100644 index d0a3f58b41b32..0000000000000 --- a/tools/api_boost/testdata/deprecate.cc +++ /dev/null @@ -1,11 +0,0 @@ -#include "envoy/api/v2/cds.pb.h" -#include "envoy/api/v2/route/route.pb.h" -#include "envoy/type/matcher/string.pb.h" - -void test() { - envoy::api::v2::route::VirtualHost vhost; - vhost.per_filter_config(); - vhost.mutable_per_filter_config(); - static_cast(envoy::type::matcher::StringMatcher::kRegex); - static_cast(envoy::api::v2::Cluster::ORIGINAL_DST_LB); -} diff --git a/tools/api_boost/testdata/deprecate.cc.gold b/tools/api_boost/testdata/deprecate.cc.gold deleted file mode 100644 index 0158efa26d9a0..0000000000000 --- a/tools/api_boost/testdata/deprecate.cc.gold +++ /dev/null @@ -1,11 +0,0 @@ -#include "envoy/config/cluster/v4alpha/cluster.pb.h" -#include "envoy/config/route/v4alpha/route_components.pb.h" -#include "envoy/type/matcher/v4alpha/string.pb.h" - -void test() { - envoy::config::route::v4alpha::VirtualHost vhost; - vhost.hidden_envoy_deprecated_per_filter_config(); - vhost.mutable_hidden_envoy_deprecated_per_filter_config(); - static_cast(envoy::type::matcher::v4alpha::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex); - static_cast(envoy::config::cluster::v4alpha::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB); -} diff --git a/tools/api_boost/testdata/elaborated_type.cc b/tools/api_boost/testdata/elaborated_type.cc deleted file mode 100644 index 6a30d1e3330e2..0000000000000 --- a/tools/api_boost/testdata/elaborated_type.cc +++ /dev/null @@ -1,10 +0,0 @@ -#include "envoy/api/v2/cds.pb.h" -#include "envoy/config/overload/v2alpha/overload.pb.h" - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const envoy::config::overload::v2alpha::ThresholdTrigger& /*config*/) {} - void someMethod(envoy::api::v2::Cluster_LbPolicy) {} - - const envoy::config::overload::v2alpha::Trigger::TriggerOneofCase case_{}; -}; diff --git a/tools/api_boost/testdata/elaborated_type.cc.gold b/tools/api_boost/testdata/elaborated_type.cc.gold deleted file mode 100644 index 442426177598e..0000000000000 --- a/tools/api_boost/testdata/elaborated_type.cc.gold +++ /dev/null @@ -1,10 +0,0 @@ -#include "envoy/config/cluster/v4alpha/cluster.pb.h" -#include "envoy/config/overload/v3/overload.pb.h" - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const envoy::config::overload::v3::ThresholdTrigger& /*config*/) {} - void someMethod(envoy::config::cluster::v4alpha::Cluster::LbPolicy) {} - - const envoy::config::overload::v3::Trigger::TriggerOneofCase case_{}; -}; diff --git a/tools/api_boost/testdata/no_boost_file.cc b/tools/api_boost/testdata/no_boost_file.cc deleted file mode 100644 index 82d11a26410b0..0000000000000 --- a/tools/api_boost/testdata/no_boost_file.cc +++ /dev/null @@ -1,12 +0,0 @@ -#include "envoy/config/overload/v2alpha/overload.pb.h" - -// API_NO_BOOST_FILE - -using envoy::config::overload::v2alpha::ThresholdTrigger; -using SomePtrAlias = std::unique_ptr; - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {} - ThresholdTriggerImpl(SomePtrAlias /*config*/) {} -}; diff --git a/tools/api_boost/testdata/no_boost_file.cc.gold b/tools/api_boost/testdata/no_boost_file.cc.gold deleted file mode 100644 index 82d11a26410b0..0000000000000 --- a/tools/api_boost/testdata/no_boost_file.cc.gold +++ /dev/null @@ -1,12 +0,0 @@ -#include "envoy/config/overload/v2alpha/overload.pb.h" - -// API_NO_BOOST_FILE - -using envoy::config::overload::v2alpha::ThresholdTrigger; -using SomePtrAlias = std::unique_ptr; - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {} - ThresholdTriggerImpl(SomePtrAlias /*config*/) {} -}; diff --git a/tools/api_boost/testdata/rename.cc b/tools/api_boost/testdata/rename.cc deleted file mode 100644 index 96e56b5f0b040..0000000000000 --- a/tools/api_boost/testdata/rename.cc +++ /dev/null @@ -1,7 +0,0 @@ -#include "envoy/api/v2/route/route.pb.h" - -void test() { - envoy::api::v2::route::RouteAction route_action; - route_action.host_rewrite(); - route_action.set_host_rewrite("blah"); -} diff --git a/tools/api_boost/testdata/rename.cc.gold b/tools/api_boost/testdata/rename.cc.gold deleted file mode 100644 index 124a528b05fdc..0000000000000 --- a/tools/api_boost/testdata/rename.cc.gold +++ /dev/null @@ -1,7 +0,0 @@ -#include "envoy/config/route/v4alpha/route_components.pb.h" - -void test() { - envoy::config::route::v4alpha::RouteAction route_action; - route_action.host_rewrite_literal(); - route_action.set_host_rewrite_literal("blah"); -} diff --git a/tools/api_boost/testdata/using_decl.cc b/tools/api_boost/testdata/using_decl.cc deleted file mode 100644 index 88b3f2ef44035..0000000000000 --- a/tools/api_boost/testdata/using_decl.cc +++ /dev/null @@ -1,11 +0,0 @@ -#include "envoy/config/overload/v2alpha/overload.pb.h" - -using envoy::config::overload::v2alpha::ThresholdTrigger; -using ::envoy::config::overload::v2alpha::Trigger; -using SomePtrAlias = std::unique_ptr; - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {} - ThresholdTriggerImpl(SomePtrAlias /*config*/) {} -}; diff --git a/tools/api_boost/testdata/using_decl.cc.gold b/tools/api_boost/testdata/using_decl.cc.gold deleted file mode 100644 index 879485050a2d3..0000000000000 --- a/tools/api_boost/testdata/using_decl.cc.gold +++ /dev/null @@ -1,11 +0,0 @@ -#include "envoy/config/overload/v3/overload.pb.h" - -using envoy::config::overload::v3::ThresholdTrigger; -using envoy::config::overload::v3::Trigger; -using SomePtrAlias = std::unique_ptr; - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {} - ThresholdTriggerImpl(SomePtrAlias /*config*/) {} -}; diff --git a/tools/api_boost/testdata/validate.cc b/tools/api_boost/testdata/validate.cc deleted file mode 100644 index 97fbd6bac35d9..0000000000000 --- a/tools/api_boost/testdata/validate.cc +++ /dev/null @@ -1,10 +0,0 @@ -#include "envoy/api/v2/cds.pb.h" -#include "envoy/api/v2/cluster.pb.validate.h" -#include "envoy/protobuf/message_validator.h" - -#include "source/common/protobuf/utility.h" - -void foo(Envoy::ProtobufMessage::ValidationVisitor& validator) { - envoy::api::v2::Cluster msg; - Envoy::MessageUtil::downcastAndValidate(msg, validator); -} diff --git a/tools/api_boost/testdata/validate.cc.gold b/tools/api_boost/testdata/validate.cc.gold deleted file mode 100644 index 5b991b7294e5e..0000000000000 --- a/tools/api_boost/testdata/validate.cc.gold +++ /dev/null @@ -1,10 +0,0 @@ -#include "envoy/config/cluster/v4alpha/cluster.pb.h" -#include "envoy/config/cluster/v4alpha/cluster.pb.validate.h" -#include "envoy/protobuf/message_validator.h" - -#include "source/common/protobuf/utility.h" - -void foo(Envoy::ProtobufMessage::ValidationVisitor& validator) { - envoy::config::cluster::v4alpha::Cluster msg; - Envoy::MessageUtil::downcastAndValidate(msg, validator); -} diff --git a/tools/api_proto_breaking_change_detector/BUILD b/tools/api_proto_breaking_change_detector/BUILD index bce76c323d361..69ce24fdb3029 100644 --- a/tools/api_proto_breaking_change_detector/BUILD +++ b/tools/api_proto_breaking_change_detector/BUILD @@ -1,4 +1,5 @@ load("@rules_python//python:defs.bzl", "py_binary", "py_library", "py_test") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 @@ -9,14 +10,28 @@ py_binary( ], data = [ "@com_github_bufbuild_buf//:buf", - "@envoy_api_canonical//:proto_breaking_change_detector_buf_config", + "@envoy_api//:proto_breaking_change_detector_buf_config", ], main = "detector.py", tags = ["manual"], deps = [ ":buf_utils", ":detector_errors", - "//tools:run_command", + ], +) + +py_binary( + name = "detector_ci", + srcs = [ + "detector_ci.py", + ], + args = ["$(location @com_github_bufbuild_buf//:buf)"], + data = [ + "@com_github_bufbuild_buf//:buf", + ], + deps = [ + ":detector", + "@envoy_repo", ], ) @@ -27,7 +42,7 @@ py_library( ], deps = [ ":detector_errors", - "//tools/base:utils", + requirement("envoy.base.utils"), ], ) @@ -50,7 +65,6 @@ py_test( tags = ["manual"], deps = [ ":detector", - "//tools:run_command", "@rules_python//python/runfiles", ], ) diff --git a/tools/api_proto_breaking_change_detector/buf_utils.py b/tools/api_proto_breaking_change_detector/buf_utils.py index 1c279e84a49c4..1bc9a96a222e6 100644 --- a/tools/api_proto_breaking_change_detector/buf_utils.py +++ b/tools/api_proto_breaking_change_detector/buf_utils.py @@ -1,9 +1,10 @@ +import subprocess from pathlib import Path from typing import List, Union, Tuple -from detector_errors import ChangeDetectorError, ChangeDetectorInitializeError -from tools.base.utils import cd_and_return -from tools.run_command import run_command +from tools.api_proto_breaking_change_detector.detector_errors import ( + ChangeDetectorError, ChangeDetectorInitializeError) +from envoy.base.utils import cd_and_return def _generate_buf_args(target_path, config_file_loc, additional_args): @@ -52,7 +53,10 @@ def pull_buf_deps( with _cd_into_config_parent(config_file_loc): buf_args = _generate_buf_args(target_path, config_file_loc, additional_args) - update_code, _, update_err = run_command(f'{buf_path} mod update') + response = subprocess.run([buf_path, "mod", "update"], + encoding="utf-8", + capture_output=True) + update_code, update_err = response.returncode, response.stderr.split("\n") # for some reason buf prints out the "downloading..." lines on stderr if update_code != 0: raise ChangeDetectorInitializeError( @@ -63,7 +67,7 @@ def pull_buf_deps( "buf mod update did not generate a buf.lock file (silent error... incorrect config?)" ) - run_command(' '.join([f'{buf_path} build', *buf_args])) + subprocess.run([buf_path, "build"] + buf_args, capture_output=True) def check_breaking( @@ -87,7 +91,7 @@ def check_breaking( additional_args {List[str]} -- additional arguments passed into the buf binary invocations Returns: - Tuple[int, List[str], List[str]] -- tuple of (exit status code, stdout, stderr) as provided by run_command. Note stdout/stderr are provided as string lists + Tuple[int, List[str], List[str]] -- tuple of (exit status code, stdout, stderr). Note stdout/stderr are provided as string lists """ with _cd_into_config_parent(config_file_loc): if not Path(git_path).exists(): @@ -100,6 +104,8 @@ def check_breaking( if subdir: initial_state_input += f',subdir={subdir}' - final_code, final_out, final_err = run_command( - ' '.join([buf_path, f"breaking --against {initial_state_input}", *buf_args])) - return final_code, final_out, final_err + response = subprocess.run([buf_path, "breaking", "--against", initial_state_input] + + buf_args, + encoding="utf-8", + capture_output=True) + return response.returncode, response.stdout.split("\n"), response.stderr.split("\n") diff --git a/tools/api_proto_breaking_change_detector/detector.py b/tools/api_proto_breaking_change_detector/detector.py index c5a66be94214c..8a4273cb76e22 100644 --- a/tools/api_proto_breaking_change_detector/detector.py +++ b/tools/api_proto_breaking_change_detector/detector.py @@ -16,8 +16,8 @@ from pathlib import Path from typing import List -from buf_utils import check_breaking, pull_buf_deps -from detector_errors import ChangeDetectorError +from tools.api_proto_breaking_change_detector.buf_utils import check_breaking, pull_buf_deps +from tools.api_proto_breaking_change_detector.detector_errors import ChangeDetectorError class ProtoBreakingChangeDetector(object): diff --git a/tools/api_proto_breaking_change_detector/detector_ci.py b/tools/api_proto_breaking_change_detector/detector_ci.py index 84278d6d4b075..4460423c68bfa 100755 --- a/tools/api_proto_breaking_change_detector/detector_ci.py +++ b/tools/api_proto_breaking_change_detector/detector_ci.py @@ -1,13 +1,16 @@ #!/usr/bin/env python3 import argparse +import os import sys from pathlib import Path -from detector import BufWrapper +from tools.api_proto_breaking_change_detector.detector import BufWrapper -API_DIR = Path("api").resolve() -GIT_PATH = Path.cwd().joinpath(".git") +import envoy_repo + +API_DIR = Path(envoy_repo.PATH).joinpath("api") +GIT_PATH = Path(envoy_repo.PATH).joinpath(".git") CONFIG_FILE_LOC = Path(API_DIR, "buf.yaml") @@ -39,6 +42,7 @@ def detect_breaking_changes_git(path_to_buf, ref): parser.add_argument( 'git_ref', type=str, help='git reference to check against for breaking changes') args = parser.parse_args() - - exit_status = detect_breaking_changes_git(args.buf_path, args.git_ref) + buf_path = os.path.abspath(args.buf_path) + os.chdir(envoy_repo.PATH) + exit_status = detect_breaking_changes_git(buf_path, args.git_ref) sys.exit(exit_status) diff --git a/tools/api_proto_breaking_change_detector/detector_ci.sh b/tools/api_proto_breaking_change_detector/detector_ci.sh deleted file mode 100755 index 7f0ec3271e1e5..0000000000000 --- a/tools/api_proto_breaking_change_detector/detector_ci.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -tools="$(dirname "$(dirname "$(realpath "$0")")")" -root=$(realpath "$tools/..") - -cd "$root" || exit 1 -# to satisfy dependency on run_command (as done in tools/code_format/check_format_test_helper.sh) -export PYTHONPATH="$root" -./tools/api_proto_breaking_change_detector/detector_ci.py "$@" diff --git a/tools/api_proto_breaking_change_detector/detector_test.py b/tools/api_proto_breaking_change_detector/detector_test.py index 1566bf2d500d8..0417f21c3fd97 100644 --- a/tools/api_proto_breaking_change_detector/detector_test.py +++ b/tools/api_proto_breaking_change_detector/detector_test.py @@ -7,6 +7,7 @@ and ensure that tool behavior is consistent across dependency updates. """ +import subprocess import tempfile import unittest from pathlib import Path @@ -14,10 +15,9 @@ from rules_python.python.runfiles import runfiles -from buf_utils import pull_buf_deps -from detector import BufWrapper -from tools.base.utils import cd_and_return -from tools.run_command import run_command +from tools.api_proto_breaking_change_detector.buf_utils import pull_buf_deps +from tools.api_proto_breaking_change_detector.detector import BufWrapper +from envoy.base.utils import cd_and_return class BreakingChangeDetectorTests(object): @@ -89,8 +89,8 @@ class BufTests(TestAllowedChanges, TestBreakingChanges, unittest.TestCase): @classmethod def _run_command_print_error(cls, cmd): - code, out, err = run_command(cmd) - out, err = '\n'.join(out), '\n'.join(err) + response = subprocess.run([cmd], shell=True, capture_output=True, encoding="utf-8") + code, out, err = response.returncode, response.stdout, response.stderr if code != 0: raise Exception( f"Error running command {cmd}\nExit code: {code} | stdout: {out} | stderr: {err}") @@ -109,8 +109,7 @@ def setUpClass(cls): copytree(testdata_path, cls._temp_dir.name, dirs_exist_ok=True) # copy in buf config - bazel_buf_config_loc = Path.cwd().joinpath( - "external", "envoy_api_canonical", "buf.yaml") + bazel_buf_config_loc = Path.cwd().joinpath("external", "envoy_api", "buf.yaml") copyfile(bazel_buf_config_loc, cls._config_file_loc) # pull buf dependencies and initialize git repo with test data files diff --git a/tools/api_proto_plugin/annotations.py b/tools/api_proto_plugin/annotations.py index 88cd2695bf15d..e878cf8bd50bd 100644 --- a/tools/api_proto_plugin/annotations.py +++ b/tools/api_proto_plugin/annotations.py @@ -16,9 +16,6 @@ # envoy.filters.network.http_connection_manager. EXTENSION_ANNOTATION = 'extension' -# Used to mark something as alpha, excluding it from the threat model. -ALPHA_ANNOTATION = 'alpha' - # Not implemented yet annotation on leading comments, leading to hiding of # field. NOT_IMPLEMENTED_HIDE_ANNOTATION = 'not-implemented-hide' @@ -36,7 +33,6 @@ VALID_ANNOTATIONS = set([ DOC_TITLE_ANNOTATION, EXTENSION_ANNOTATION, - ALPHA_ANNOTATION, EXTENSION_CATEGORY_ANNOTATION, NOT_IMPLEMENTED_HIDE_ANNOTATION, NEXT_FREE_FIELD_ANNOTATION, diff --git a/tools/api_proto_plugin/utils.py b/tools/api_proto_plugin/utils.py index 91007f5f54043..440b252effb47 100644 --- a/tools/api_proto_plugin/utils.py +++ b/tools/api_proto_plugin/utils.py @@ -11,8 +11,8 @@ def proto_file_canonical_from_label(label): A string with the path, e.g. for @envoy_api//envoy/type/matcher:metadata.proto this would be envoy/type/matcher/matcher.proto. """ - assert (label.startswith('@envoy_api_canonical//')) - return label[len('@envoy_api_canonical//'):].replace(':', '/') + assert (label.startswith('@envoy_api//')) + return label[len('@envoy_api//'):].replace(':', '/') def bazel_bin_path_for_output_artifact(label, suffix, root=''): @@ -24,9 +24,9 @@ def bazel_bin_path_for_output_artifact(label, suffix, root=''): root: location of bazel-bin/, if not specified, PWD. Returns: - Path in bazel-bin/external/envoy_api_canonical for label output with given suffix. + Path in bazel-bin/external/envoy_api for label output with given suffix. """ proto_file_path = proto_file_canonical_from_label(label) return os.path.join( - root, 'bazel-bin/external/envoy_api_canonical', os.path.dirname(proto_file_path), 'pkg', + root, 'bazel-bin/external/envoy_api', os.path.dirname(proto_file_path), 'pkg', proto_file_path + suffix) diff --git a/tools/base/BUILD b/tools/base/BUILD index 8de9977da9dcd..1169240338686 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -1,43 +1,17 @@ +load("@rules_python//python:defs.bzl", "py_binary") load("@base_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") -load("//tools/base:envoy_python.bzl", "envoy_py_library") licenses(["notice"]) # Apache 2 envoy_package() -exports_files([ - "base_command.py", -]) - -envoy_py_library( - "tools.base.aio", - deps = [ - requirement("aio.functional"), - ], -) - -envoy_py_library( - "tools.base.checker", - deps = [ - ":runner", - ], -) - -envoy_py_library( - "tools.base.runner", - deps = [ - requirement("colorama"), - requirement("coloredlogs"), - requirement("frozendict"), - requirement("verboselogs"), - ], -) - -envoy_py_library( - "tools.base.utils", +py_binary( + name = "bazel_query", + srcs = ["bazel_query.py"], + main = "bazel_query.py", deps = [ - requirement("pyyaml"), - requirement("setuptools"), + "@envoy_repo", + requirement("envoy.base.utils"), ], ) diff --git a/tools/base/aio.py b/tools/base/aio.py deleted file mode 100644 index a07787c31133f..0000000000000 --- a/tools/base/aio.py +++ /dev/null @@ -1,509 +0,0 @@ -import asyncio -import inspect -import os -import subprocess -import types -from concurrent.futures import Executor, ProcessPoolExecutor -from functools import cached_property, partial -from typing import ( - Any, AsyncGenerator, AsyncIterable, AsyncIterator, Awaitable, Iterable, Iterator, List, - Optional, Union) - -from aio.functional import async_property - - -class ConcurrentError(Exception): - """Raised when given inputs/awaitables are incorrect""" - pass - - -class ConcurrentIteratorError(ConcurrentError): - """Raised when iteration of provided awaitables fails""" - pass - - -class ConcurrentExecutionError(ConcurrentError): - """Raised when execution of a provided awaitable fails""" - pass - - -class async_subprocess: # noqa: N801 - - @classmethod - async def parallel( - cls, commands: Iterable[Iterable[str]], - **kwargs) -> AsyncGenerator[subprocess.CompletedProcess, Iterable[Iterable[str]]]: - """Run external subprocesses in parallel - - Yields `subprocess.CompletedProcess` results as they are completed. - - Example usage: - - ``` - import asyncio - - from tools.base.aio import async_subprocess - - async def run_system_commands(commands): - async for result in async_subprocess.parallel(commands, capture_output=True): - print(result.returncode) - print(result.stdout) - print(result.stderr) - - asyncio.run(run_system_commands(["whoami"] for i in range(0, 5))) - ``` - """ - # Using a `ProcessPoolExecutor` or `ThreadPoolExecutor` here is somewhat - # arbitrary as subproc will spawn a new process regardless. - # Either way - using a custom executor of either type gives considerable speedup, - # most likely due to the number of workers allocated. - # In my testing, `ProcessPoolExecutor` gave a very small speedup over a large - # number of tasks, despite any additional overhead of creating the executor. - # Without `max_workers` set `ProcessPoolExecutor` defaults to the number of cpus - # on the machine. - with ProcessPoolExecutor() as pool: - futures = asyncio.as_completed( - tuple( - asyncio.ensure_future(cls.run(command, executor=pool, **kwargs)) - for command in commands)) - for result in futures: - yield await result - - @classmethod - async def run( - cls, - *args, - loop: Optional[asyncio.AbstractEventLoop] = None, - executor: Optional[Executor] = None, - **kwargs) -> subprocess.CompletedProcess: - """This is an asyncio wrapper for `subprocess.run` - - It can be used in a similar way to `subprocess.run` but its non-blocking to - the main thread. - - Example usage: - - ``` - import asyncio - - from tools.base.aio import async_subprocess - - async def run_system_command(): - result = await async_subprocess.run(["whoami"], capture_output=True) - print(result.returncode) - print(result.stdout) - print(result.stderr) - - asyncio.run(run_system_command()) - - ``` - - By default it will spawn the process using the main event loop, and that loop's - default (`ThreadPool`) executor. - - You can provide the loop and/or the executor to change this behaviour. - """ - loop = loop or asyncio.get_running_loop() - return await loop.run_in_executor(executor, partial(subprocess.run, *args, **kwargs)) - - -_sentinel = object() - - -class concurrent: # noqa: N801 - """This utility provides very similar functionality to - `asyncio.as_completed` in that it runs coroutines in concurrent, yielding the - results as they are available. - - There are a couple of differences: - - - `coros` can be any `iterables` including sync/async `generators` - - `limit` can be supplied to specify the maximum number of concurrent tasks - - Setting `limit` to `-1` will make all tasks run in concurrent. - - The default is `number of cores + 4` to a maximum of `32`. - - For network tasks it might make sense to set the concurrency `limit` lower - than the default, if, for example, opening many concurrent connections will trigger - rate-limiting or soak bandwidth. - - If an error is raised while trying to iterate the provided coroutines, the - error is wrapped in an `ConcurrentIteratorError` and is raised immediately. - - In this case, no further handling occurs, and `yield_exceptions` has no - effect. - - Any errors raised while trying to create or run tasks are wrapped in - `ConcurrentError`. - - Any errors raised during task execution are wrapped in - `ConcurrentExecutionError`. - - If you specify `yield_exceptions` as `True` then the wrapped errors will be - yielded in the results. - - If `yield_exceptions` is False (the default), then the wrapped error will be - raised immediately. - - If you use any kind of `Generator` or `AsyncGenerator` to produce the - awaitables, and `yield_exceptions` is `False`, in the event that an error - occurs, it is your responsibility to `close` remaining awaitables that you - might have created but which have not already been fired. - - This utility is mostly useful for concurrentizing io-bound (as opposed to - cpu-bound) tasks. - - Example usage: - - ``` - import random - - from tools.base import aio - - async def task_to_run(i): - print(f"{i} starting") - wait = random.random() * 10 - await asyncio.sleep(wait) - return i, wait - - async def run(coros): - async for (i, wait) in aio.concurrent(coros, limit=3): - print(f"{i} waited {wait}") - - def provider(): - for i in range(0, 10): - yield task_to_run(i) - - asyncio.run(run(provider())) - ``` - """ - - def __init__( - self, - coros: Union[types.AsyncGeneratorType, AsyncIterable[Awaitable], - AsyncIterator[Awaitable], types.GeneratorType, Iterator[Awaitable], - Iterable[Awaitable]], - yield_exceptions: Optional[bool] = False, - limit: Optional[int] = None): - self._coros = coros - self._limit = limit - self._running: List[asyncio.Task] = [] - self.yield_exceptions = yield_exceptions - - def __aiter__(self) -> AsyncIterator: - """Start a coroutine task to process the submit queue, and return - an async generator to deliver results back as they arrive - """ - self.submit_task = asyncio.create_task(self.submit()) - return self.output() - - @property - def active(self) -> bool: - """Checks whether the iterator is active, either because it - hasn't finished submitting or because there are still tasks running - """ - return self.submitting or self.running - - @property - def closed(self) -> bool: - """If an unhandled error occurs, the generator is closed and no further - processing should happen - """ - return self.closing_lock.locked() - - @cached_property - def closing_lock(self) -> asyncio.Lock: - """Flag to indicate whether the generator has been closed""" - return asyncio.Lock() - - @cached_property - def consumes_async(self) -> bool: - """Provided coros iterable is some kind of async provider""" - return isinstance(self._coros, (types.AsyncGeneratorType, AsyncIterator, AsyncIterable)) - - @cached_property - def consumes_generator(self) -> bool: - """Provided coros iterable is some kind of generator""" - return isinstance(self._coros, (types.AsyncGeneratorType, types.GeneratorType)) - - @async_property - async def coros(self) -> AsyncIterator[Union[ConcurrentIteratorError, Awaitable]]: - """An async iterator of the provided coroutines""" - coros = self.iter_coros() - try: - async for coro in coros: - yield coro - except GeneratorExit: - # If we exit before we finish generating we land here (ie error was raised) - # In this case we need to tell the (possibly) async generating provider to - # also close. - try: - await coros.aclose() # type:ignore - finally: - # Suppress errors closing the provider generator - # This can raise a further `GeneratorExit` but it will stop providing. - return - - @property - def default_limit(self) -> int: - """Default is to use cpu+4 to a max of 32 coroutines""" - # This reflects the default for asyncio's `ThreadPoolExecutor`, this is a fairly - # arbitrary number to use, but it seems like a reasonable default. - return min(32, (os.cpu_count() or 0) + 4) - - @cached_property - def limit(self) -> int: - """The limit for concurrent coroutines""" - return self._limit or self.default_limit - - @cached_property - def nolimit(self) -> bool: - """Flag indicating no limit to concurrency""" - return self.limit == -1 - - @cached_property - def out(self) -> asyncio.Queue: - """Queue of results to yield back""" - return asyncio.Queue() - - @property - def running(self) -> bool: - """Flag to indicate whether any tasks are running""" - return not self.running_queue.empty() - - @cached_property - def running_queue(self) -> asyncio.Queue: - """Queue which is incremented/decremented as tasks begin/end - - This is for tracking when there are no longer any tasks running. - - A queue is used here as opposed to other synchronization primitives, as - it allows us to get the size and emptiness. - - The queue values are `None`. - """ - return asyncio.Queue() - - @cached_property - def running_tasks(self) -> List[asyncio.Task]: - """Currently running asyncio tasks""" - return self._running - - @cached_property - def sem(self) -> asyncio.Semaphore: - """A sem lock to limit the number of concurrent tasks""" - return asyncio.Semaphore(self.limit) - - @cached_property - def submission_lock(self) -> asyncio.Lock: - """Submission lock to indicate when submission is complete""" - return asyncio.Lock() - - @property - def submitting(self) -> bool: - """Flag to indicate whether we are still submitting coroutines""" - return self.submission_lock.locked() - - async def cancel(self) -> None: - """Stop the submission queue, cancel running tasks, close pending coroutines. - - This is triggered when an unhandled error occurs and the queue should - stop processing and bail. - """ - # Kitchen is closed - await self.close() - - # No more waiting - if not self.nolimit: - self.sem.release() - - # Cancel tasks - await self.cancel_tasks() - - # Close pending coroutines - await self.close_coros() - - # let the submission queue die - await self.submit_task - - async def cancel_tasks(self) -> None: - """Cancel any running tasks""" - - for running in self.running_tasks: - running.cancel() - try: - await running - finally: - # ignore errors, we are dying anyway - continue - - async def close(self) -> None: - """Close the generator, prevent any further processing""" - if not self.closed: - await self.closing_lock.acquire() - - async def close_coros(self) -> None: - """Close provided coroutines (unless the provided coros is a generator)""" - if self.consumes_generator: - # If we have a generator, dont blow/create/wait upon any more items - return - - async for coro in self.iter_coros(): - try: - # this could be an `aio.ConcurrentError` and not have a - # `close` method, but as we are asking for forgiveness anyway, - # no point in looking before we leap. - coro.close() # type:ignore - finally: - # ignore errors, we are dying anyway - continue - - async def create_task(self, coro: Awaitable) -> None: - """Create an asyncio task from the coroutine, and remember it""" - task = asyncio.create_task(self.task(coro)) - self.remember_task(task) - self.running_queue.put_nowait(None) - - async def exit_on_completion(self) -> None: - """Send the exit signal to the output queue""" - if not self.active and not self.closed: - await self.out.put(_sentinel) - - def forget_task(self, task: asyncio.Task) -> None: - """Task? what task?""" - if self.closed: - # If we are closing, don't remove, as this has been triggered - # by cancellation. - return - self.running_tasks.remove(task) - - async def iter_coros(self) -> AsyncIterator[Union[ConcurrentIteratorError, Awaitable]]: - """Iterate provided coros either synchronously or asynchronously, - yielding the awaitables asynchoronously. - """ - try: - if self.consumes_async: - async for coro in self._coros: # type:ignore - yield coro - else: - for coro in self._coros: # type:ignore - yield coro - except BaseException as e: - # Catch all errors iterating (other errors are caught elsewhere) - # If iterating raises, wrap the error and send it to `submit` and - # and `output` to close the queues. - yield ConcurrentIteratorError(e) - - async def on_task_complete(self, result: Any, decrement: Optional[bool] = True) -> None: - """Output the result, release the sem lock, decrement the running - count, and notify output queue if complete. - """ - if self.closed: - # Results can come back after the queue has closed as they are - # cancelled. - # In that case, nothing further to do. - return - - # Give result to output - await self.out.put(result) - - if not self.nolimit: - # Release the sem.lock - self.sem.release() - if decrement: - # Decrement the running_queue if it was incremented - self.running_queue.get_nowait() - # Exit if nothing left to do - await self.exit_on_completion() - - async def output(self) -> AsyncIterator: - """Asynchronously yield results as they become available""" - while True: - # Wait for some output - result = await self.out.get() - if result is _sentinel: - # All done! - await self.close() - break - elif self.should_error(result): - # Raise an error and bail! - await self.cancel() - raise result - yield result - - async def ready(self) -> bool: - """Wait for the sem.lock and indicate availability in the submission - queue - """ - if self.closed: - return False - if not self.nolimit: - await self.sem.acquire() - # We check before and after acquiring the sem.lock to see whether - # we are `closed` as these events can be separated in - # time/procedure. - if self.closed: - return False - return True - - def remember_task(self, task: asyncio.Task) -> None: - """Remember a scheduled asyncio task, in case it needs to be - cancelled - """ - self.running_tasks.append(task) - task.add_done_callback(self.forget_task) - - def should_error(self, result: Any) -> bool: - """Check a result type and whether it should raise an error""" - return ( - isinstance(result, ConcurrentIteratorError) - or (isinstance(result, ConcurrentError) and not self.yield_exceptions)) - - async def submit(self) -> None: - """Process the iterator of coroutines as a submission queue""" - await self.submission_lock.acquire() - async for coro in self.coros: - if isinstance(coro, ConcurrentIteratorError): - # Iteration error, exit now - await self.out.put(coro) - break - if not await self.ready(): - # Queue is closing, get out of here - try: - # Ensure the last coro to be produced/generated is closed, - # as it will not be scheduled as a task, and in the case - # of generators it wont be closed any other way. - coro.close() - finally: - # ignore all coro closing errors, we are dying - break - # Check the supplied coro is awaitable - try: - self.validate_coro(coro) - except ConcurrentError as e: - await self.on_task_complete(e, decrement=False) - continue - # All good, create a task - await self.create_task(coro) - self.submission_lock.release() - # If cleanup of the submission queue has taken longer than processing - # we need to manually close - await self.exit_on_completion() - - async def task(self, coro: Awaitable) -> None: - """Task wrapper to catch/wrap errors and output awaited results""" - try: - result = await coro - except BaseException as e: - result = ConcurrentExecutionError(e) - finally: - await self.on_task_complete(result) - - def validate_coro(self, coro: Awaitable) -> None: - """Validate that a provided coroutine is actually awaitable""" - if not inspect.isawaitable(coro): - raise ConcurrentError(f"Provided input was not a coroutine: {coro}") - - if inspect.getcoroutinestate(coro) != inspect.CORO_CREATED: - raise ConcurrentError(f"Provided coroutine has already been fired: {coro}") diff --git a/tools/base/base_command.py b/tools/base/base_command.py deleted file mode 100644 index 41cd5675da16f..0000000000000 --- a/tools/base/base_command.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python3 - -import sys - -from __UPSTREAM_PACKAGE__ import main as upstream_main - - -def main(*args: str) -> int: - return upstream_main(*args) - - -if __name__ == "__main__": - sys.exit(main(*sys.argv[1:])) diff --git a/tools/base/bazel_query.py b/tools/base/bazel_query.py new file mode 100644 index 0000000000000..48825838de45e --- /dev/null +++ b/tools/base/bazel_query.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +"""Envoy Bazel query implementation. + +This module can be used either as a `py_binary` or a `py_library`. + +cli usage (outputs to json): + +```console +$ bazel run //tools/base:bazel_query "deps(source/...)" | jq "." +``` + +python usage: + +```python +from tools.base.bazel_query import query + +result = query("deps(source/...)") +``` + +NB: This allows running queries that do not define scope and cannot be +run as genqueries. **It should not therefore be used in build rules**. +""" + +# The upstream lib is maintained here: +# +# https://github.com/envoyproxy/pytooling/tree/main/envoy.base.utils +# +# Please submit issues/PRs to the pytooling repo: +# +# https://github.com/envoyproxy/pytooling +# + +import json +import pathlib +import sys +from functools import cached_property + +import abstracts + +from envoy.base.utils import ABazelQuery + +import envoy_repo + + +@abstracts.implementer(ABazelQuery) +class EnvoyBazelQuery: + + @cached_property + def path(self) -> pathlib.Path: + return pathlib.Path(envoy_repo.PATH) + + +query = EnvoyBazelQuery().query + + +def main(*args): + print(json.dumps(query(*args[0:1]))) + + +if __name__ == "__main__": + sys.exit(main(*sys.argv[1:])) + +__all__ = ("query",) diff --git a/tools/base/checker.py b/tools/base/checker.py deleted file mode 100644 index 4feed282d2ec1..0000000000000 --- a/tools/base/checker.py +++ /dev/null @@ -1,378 +0,0 @@ -import argparse -import asyncio -import logging -import pathlib -from functools import cached_property -from typing import Any, Iterable, Optional, Sequence, Tuple, Type - -from tools.base import runner - - -class BaseChecker(runner.Runner): - """Runs check methods prefixed with `check_` and named in `self.checks` - - Check methods should call the `self.warn`, `self.error` or `self.succeed` - depending upon the outcome of the checks. - """ - _active_check = "" - checks: Tuple[str, ...] = () - - def __init__(self, *args): - super().__init__(*args) - self.success = {} - self.errors = {} - self.warnings = {} - - @property - def active_check(self) -> str: - return self._active_check - - @property - def diff(self) -> bool: - """Flag to determine whether the checker should print diffs to the console""" - return self.args.diff - - @property - def error_count(self) -> int: - """Count of all errors found""" - return sum(len(e) for e in self.errors.values()) - - @property - def exiting(self): - return "exiting" in self.errors - - @property - def failed(self) -> dict: - """Dictionary of errors per check""" - return dict((k, (len(v))) for k, v in self.errors.items()) - - @property - def fix(self) -> bool: - """Flag to determine whether the checker should attempt to fix found problems""" - return self.args.fix - - @property - def has_failed(self) -> bool: - """Shows whether there are any failures""" - # add logic for warn/error - return bool(self.failed or self.warned) - - @cached_property - def path(self) -> pathlib.Path: - """The "path" - usually Envoy src dir. This is used for finding configs for the tooling and should be a dir""" - try: - path = pathlib.Path(self.args.path or self.args.paths[0]) - except IndexError: - raise self.parser.error( - "Missing path: `path` must be set either as an arg or with --path") - if not path.is_dir(): - raise self.parser.error( - "Incorrect path: `path` must be a directory, set either as first arg or with --path" - ) - return path - - @property - def paths(self) -> list: - """List of paths to apply checks to""" - return self.args.paths or [self.path] - - @property - def show_summary(self) -> bool: - """Show a summary at the end or not""" - return bool( - not self.exiting and (self.args.summary or self.error_count or self.warning_count)) - - @property - def status(self) -> dict: - """Dictionary showing current success/warnings/errors""" - return dict( - success=self.success_count, - errors=self.error_count, - warnings=self.warning_count, - failed=self.failed, - warned=self.warned, - succeeded=self.succeeded) - - @property - def succeeded(self) -> dict: - """Dictionary of successful checks grouped by check type""" - return dict((k, (len(v))) for k, v in self.success.items()) - - @property - def success_count(self) -> int: - """Current count of successful checks""" - return sum(len(e) for e in self.success.values()) - - @cached_property - def summary(self) -> "CheckerSummary": - """Instance of the checker's summary class""" - return self.summary_class(self) - - @property - def summary_class(self) -> Type["CheckerSummary"]: - """Checker's summary class""" - return CheckerSummary - - @property - def warned(self) -> dict: - """Dictionary of warned checks grouped by check type""" - return dict((k, (len(v))) for k, v in self.warnings.items()) - - @property - def warning_count(self) -> int: - """Current count of warned checks""" - return sum(len(e) for e in self.warnings.values()) - - def add_arguments(self, parser: argparse.ArgumentParser) -> None: - """Add arguments to the arg parser""" - super().add_arguments(parser) - parser.add_argument( - "--fix", action="store_true", default=False, help="Attempt to fix in place") - parser.add_argument( - "--diff", - action="store_true", - default=False, - help="Display a diff in the console where available") - parser.add_argument( - "--warning", - "-w", - choices=["warn", "error"], - default="warn", - help="Handle warnings as warnings or errors") - parser.add_argument( - "--summary", action="store_true", default=False, help="Show a summary of check runs") - parser.add_argument( - "--summary-errors", - type=int, - default=5, - help="Number of errors to show in the summary, -1 shows all") - parser.add_argument( - "--summary-warnings", - type=int, - default=5, - help="Number of warnings to show in the summary, -1 shows all") - parser.add_argument( - "--check", - "-c", - choices=self.checks, - nargs="*", - help="Specify which checks to run, can be specified for multiple checks") - for check in self.checks: - parser.add_argument( - f"--config-{check}", default="", help=f"Custom configuration for the {check} check") - parser.add_argument( - "--path", - "-p", - default=None, - help= - "Path to the test root (usually Envoy source dir). If not specified the first path of paths is used" - ) - parser.add_argument( - "paths", - nargs="*", - help= - "Paths to check. At least one path must be specified, or the `path` argument should be provided" - ) - - def error( - self, - name: str, - errors: Optional[Iterable[str]], - log: bool = True, - log_type: str = "error") -> int: - """Record (and log) errors for a check type""" - if not errors: - return 0 - self.errors[name] = self.errors.get(name, []) - self.errors[name].extend(errors) - if not log: - return 1 - for message in errors: - getattr(self.log, log_type)(f"[{name}] {message}") - return 1 - - def exit(self) -> int: - self.log.handlers[0].setLevel(logging.FATAL) - self.stdout.handlers[0].setLevel(logging.FATAL) - return self.error("exiting", ["Keyboard exit"], log_type="fatal") - - def get_checks(self) -> Sequence[str]: - """Get list of checks for this checker class filtered according to user args""" - return ( - self.checks if not self.args.check else - [check for check in self.args.check if check in self.checks]) - - def on_check_begin(self, check: str) -> Any: - self._active_check = check - self.log.notice(f"[{check}] Running check") - - def on_check_run(self, check: str) -> Any: - """Callback hook called after each check run""" - self._active_check = "" - if self.exiting: - return - elif check in self.errors: - self.log.error(f"[{check}] Check failed") - elif check in self.warnings: - self.log.warning(f"[{check}] Check has warnings") - else: - self.log.success(f"[{check}] Check completed successfully") - - def on_checks_begin(self) -> Any: - """Callback hook called before all checks""" - pass - - def on_checks_complete(self) -> Any: - """Callback hook called after all checks have run, and returning the final outcome of a checks_run""" - if self.show_summary: - self.summary.print_summary() - return 1 if self.has_failed else 0 - - @runner.cleansup - def run(self) -> int: - """Run all configured checks and return the sum of their error counts""" - checks = self.get_checks() - try: - self.on_checks_begin() - for check in checks: - self.on_check_begin(check) - getattr(self, f"check_{check}")() - self.on_check_run(check) - except KeyboardInterrupt as e: - self.exit() - finally: - result = self.on_checks_complete() - return result - - def succeed(self, name: str, success: list, log: bool = True) -> None: - """Record (and log) success for a check type""" - self.success[name] = self.success.get(name, []) - self.success[name].extend(success) - if not log: - return - for message in success: - self.log.success(f"[{name}] {message}") - - def warn(self, name: str, warnings: list, log: bool = True) -> None: - """Record (and log) warnings for a check type""" - self.warnings[name] = self.warnings.get(name, []) - self.warnings[name].extend(warnings) - if not log: - return - for message in warnings: - self.log.warning(f"[{name}] {message}") - - -class Checker(BaseChecker): - - def on_check_begin(self, check: str) -> None: - super().on_check_begin(check) - - def on_check_run(self, check: str) -> None: - super().on_check_run(check) - - def on_checks_begin(self) -> None: - super().on_checks_complete() - - def on_checks_complete(self) -> int: - return super().on_checks_complete() - - -class BazelChecker(runner.BazelRunner, Checker): - pass - - -class CheckerSummary(object): - - def __init__(self, checker: BaseChecker): - self.checker = checker - - @property - def max_errors(self) -> int: - """Maximum errors to display in summary""" - return self.checker.args.summary_errors - - @property - def max_warnings(self) -> int: - """Maximum warnings to display in summary""" - return self.checker.args.summary_warnings - - def print_failed(self, problem_type): - _out = [] - _max = getattr(self, f"max_{problem_type}") - for check, problems in getattr(self.checker, problem_type).items(): - _msg = f"{self.checker.name} {check}" - _max = (min(len(problems), _max) if _max >= 0 else len(problems)) - msg = ( - f"{_msg}: (showing first {_max} of {len(problems)})" if - (len(problems) > _max and _max > 0) else (f"{_msg}:" if _max != 0 else _msg)) - _out.extend(self._section(msg, problems[:_max])) - if not _out: - return - output = ( - self.checker.log.warning if problem_type == "warnings" else self.checker.log.error) - output("\n".join(_out + [""])) - - def print_status(self) -> None: - """Print summary status to stderr""" - if self.checker.errors: - self.checker.log.error(f"{self.checker.status}") - elif self.checker.warnings: - self.checker.log.warning(f"{self.checker.status}") - else: - self.checker.log.info(f"{self.checker.status}") - - def print_summary(self) -> None: - """Write summary to stderr""" - self.print_failed("warnings") - self.print_failed("errors") - self.print_status() - - def _section(self, message: str, lines: list = None) -> list: - """Print a summary section""" - section = ["Summary", "-" * 80, f"{message}"] - if lines: - section += [line.split("\n")[0] for line in lines] - return section - - -class AsyncChecker(BaseChecker): - """Async version of the Checker class for use with asyncio""" - - async def _run(self) -> int: - checks = self.get_checks() - try: - await self.on_checks_begin() - for check in checks: - await self.on_check_begin(check) - await getattr(self, f"check_{check}")() - await self.on_check_run(check) - finally: - if self.exiting: - result = 1 - else: - result = await self.on_checks_complete() - return result - - @runner.cleansup - def run(self) -> int: - try: - return asyncio.get_event_loop().run_until_complete(self._run()) - except KeyboardInterrupt as e: - # This needs to be outside the loop to catch the a keyboard interrupt - # This means that a new loop has to be created to cleanup - result = self.exit() - result = asyncio.get_event_loop().run_until_complete(self.on_checks_complete()) - return result - - async def on_check_begin(self, check: str) -> None: - super().on_check_begin(check) - - async def on_check_run(self, check: str) -> None: - super().on_check_run(check) - - async def on_checks_begin(self) -> None: - super().on_checks_begin() - - async def on_checks_complete(self) -> int: - return super().on_checks_complete() diff --git a/tools/base/envoy_python.bzl b/tools/base/envoy_python.bzl index 3af2b50323834..550ff901768de 100644 --- a/tools/base/envoy_python.bzl +++ b/tools/base/envoy_python.bzl @@ -71,58 +71,3 @@ def envoy_py_binary( if test: envoy_py_test(name, package, visibility, envoy_prefix = envoy_prefix) - -def envoy_py_script( - name, - entry_point, - deps = [], - data = [], - visibility = ["//visibility:public"], - envoy_prefix = "@envoy"): - """This generates a `py_binary` from an entry_point in a python package - - Currently, the actual entrypoint callable is hard-coded to `main`. - - For example, if you wish to make use of a `console_script` in an upstream - package that resolves as `envoy.code_format.python.command.main` from a - package named `envoy.code_format.python`, you can use this macro as - follows: - - ```skylark - - envoy_py_script( - name = "tools.code_format.python", - entry_point = "envoy.code_format.python.command", - deps = [requirement("envoy.code_format.python")], - ``` - - You will then be able to use the console script from bazel. - - Separate args to be passed to the console_script with `--`, eg: - - ```console - - $ bazel run //tools/code_format:python -- -h - ``` - - """ - py_file = "%s.py" % name.split(".")[-1] - output = "$(@D)/%s" % py_file - template_rule = "%s//tools/base:base_command.py" % envoy_prefix - template = "$(location %s)" % template_rule - - native.genrule( - name = "py_script_%s" % py_file, - cmd = "sed s/__UPSTREAM_PACKAGE__/%s/ %s > \"%s\"" % (entry_point, template, output), - tools = [template_rule], - outs = [py_file], - ) - - envoy_py_binary( - name = name, - deps = deps, - data = data, - visibility = visibility, - envoy_prefix = envoy_prefix, - test = False, - ) diff --git a/tools/base/requirements.in b/tools/base/requirements.in index 8ec1dbb9be567..cec88af56453c 100644 --- a/tools/base/requirements.in +++ b/tools/base/requirements.in @@ -1,10 +1,17 @@ aio.functional +aio.subprocess +aio.tasks colorama coloredlogs coverage +envoy.base.checker +envoy.base.runner envoy.base.utils +envoy.code_format.python_check>=0.0.4 +envoy.dependency.pip_check>=0.0.4 envoy.distribution.release envoy.distribution.verify +envoy.docs.sphinx-runner>=0.0.3 envoy.gpg.sign flake8 frozendict @@ -19,6 +26,7 @@ pytest-cov pytest-patches pyyaml setuptools +slackclient sphinx sphinxcontrib-httpdomain sphinxcontrib-serializinghtml diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index 8d18996322e79..cc7c4fd129fdb 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile --allow-unsafe --generate-hashes tools/base/requirements.in +# pip-compile --allow-unsafe --generate-hashes requirements.in # abstracts==0.0.12 \ --hash=sha256:acc01ff56c8a05fb88150dff62e295f9071fc33388c42f1dfc2787a8d1c755ff @@ -10,21 +10,30 @@ abstracts==0.0.12 \ # aio.functional # envoy.abstract.command # envoy.base.utils + # envoy.code-format.python-check + # envoy.dependency.pip-check # envoy.github.abstract # envoy.github.release aio.functional==0.0.9 \ --hash=sha256:824a997a394ad891bc9f403426babc13c9d0d1f4d1708c38e77d6aecae1cab1d # via - # -r tools/base/requirements.in + # -r requirements.in # aio.tasks # envoy.github.abstract # envoy.github.release aio.stream==0.0.2 \ --hash=sha256:6f5baaff48f6319db134cd56c06ccf89db1f7c5f67a26382e081efc96f2f675d # via envoy.github.release +aio.subprocess==0.0.4 \ + --hash=sha256:fd504a7c02423c40fde19ad87b62932b9eaa091f5a22d26b89b452059a728750 + # via + # -r requirements.in + # envoy.code-format.python-check aio.tasks==0.0.4 \ --hash=sha256:9abd4b0881edb292c4f91a2f63b1dea7a9829a4bd4e8440225a1a412a90461fc # via + # -r requirements.in + # envoy.code-format.python-check # envoy.github.abstract # envoy.github.release aiodocker==0.21.0 \ @@ -80,6 +89,7 @@ aiohttp==3.7.4.post0 \ # aiodocker # envoy.github.abstract # envoy.github.release + # slackclient alabaster==0.7.12 \ --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \ --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 @@ -162,68 +172,53 @@ charset-normalizer==2.0.4 \ colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 - # via -r tools/base/requirements.in + # via + # -r requirements.in + # envoy.docs.sphinx-runner coloredlogs==15.0.1 \ --hash=sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934 \ --hash=sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0 # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.base.runner -coverage==5.5 \ - --hash=sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c \ - --hash=sha256:01d84219b5cdbfc8122223b39a954820929497a1cb1422824bb86b07b74594b6 \ - --hash=sha256:040af6c32813fa3eae5305d53f18875bedd079960822ef8ec067a66dd8afcd45 \ - --hash=sha256:06191eb60f8d8a5bc046f3799f8a07a2d7aefb9504b0209aff0b47298333302a \ - --hash=sha256:13034c4409db851670bc9acd836243aeee299949bd5673e11844befcb0149f03 \ - --hash=sha256:13c4ee887eca0f4c5a247b75398d4114c37882658300e153113dafb1d76de529 \ - --hash=sha256:184a47bbe0aa6400ed2d41d8e9ed868b8205046518c52464fde713ea06e3a74a \ - --hash=sha256:18ba8bbede96a2c3dde7b868de9dcbd55670690af0988713f0603f037848418a \ - --hash=sha256:1aa846f56c3d49205c952d8318e76ccc2ae23303351d9270ab220004c580cfe2 \ - --hash=sha256:217658ec7187497e3f3ebd901afdca1af062b42cfe3e0dafea4cced3983739f6 \ - --hash=sha256:24d4a7de75446be83244eabbff746d66b9240ae020ced65d060815fac3423759 \ - --hash=sha256:2910f4d36a6a9b4214bb7038d537f015346f413a975d57ca6b43bf23d6563b53 \ - --hash=sha256:2949cad1c5208b8298d5686d5a85b66aae46d73eec2c3e08c817dd3513e5848a \ - --hash=sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4 \ - --hash=sha256:2cafbbb3af0733db200c9b5f798d18953b1a304d3f86a938367de1567f4b5bff \ - --hash=sha256:2e0d881ad471768bf6e6c2bf905d183543f10098e3b3640fc029509530091502 \ - --hash=sha256:30c77c1dc9f253283e34c27935fded5015f7d1abe83bc7821680ac444eaf7793 \ - --hash=sha256:3487286bc29a5aa4b93a072e9592f22254291ce96a9fbc5251f566b6b7343cdb \ - --hash=sha256:372da284cfd642d8e08ef606917846fa2ee350f64994bebfbd3afb0040436905 \ - --hash=sha256:41179b8a845742d1eb60449bdb2992196e211341818565abded11cfa90efb821 \ - --hash=sha256:44d654437b8ddd9eee7d1eaee28b7219bec228520ff809af170488fd2fed3e2b \ - --hash=sha256:4a7697d8cb0f27399b0e393c0b90f0f1e40c82023ea4d45d22bce7032a5d7b81 \ - --hash=sha256:51cb9476a3987c8967ebab3f0fe144819781fca264f57f89760037a2ea191cb0 \ - --hash=sha256:52596d3d0e8bdf3af43db3e9ba8dcdaac724ba7b5ca3f6358529d56f7a166f8b \ - --hash=sha256:53194af30d5bad77fcba80e23a1441c71abfb3e01192034f8246e0d8f99528f3 \ - --hash=sha256:5fec2d43a2cc6965edc0bb9e83e1e4b557f76f843a77a2496cbe719583ce8184 \ - --hash=sha256:6c90e11318f0d3c436a42409f2749ee1a115cd8b067d7f14c148f1ce5574d701 \ - --hash=sha256:74d881fc777ebb11c63736622b60cb9e4aee5cace591ce274fb69e582a12a61a \ - --hash=sha256:7501140f755b725495941b43347ba8a2777407fc7f250d4f5a7d2a1050ba8e82 \ - --hash=sha256:796c9c3c79747146ebd278dbe1e5c5c05dd6b10cc3bcb8389dfdf844f3ead638 \ - --hash=sha256:869a64f53488f40fa5b5b9dcb9e9b2962a66a87dab37790f3fcfb5144b996ef5 \ - --hash=sha256:8963a499849a1fc54b35b1c9f162f4108017b2e6db2c46c1bed93a72262ed083 \ - --hash=sha256:8d0a0725ad7c1a0bcd8d1b437e191107d457e2ec1084b9f190630a4fb1af78e6 \ - --hash=sha256:900fbf7759501bc7807fd6638c947d7a831fc9fdf742dc10f02956ff7220fa90 \ - --hash=sha256:92b017ce34b68a7d67bd6d117e6d443a9bf63a2ecf8567bb3d8c6c7bc5014465 \ - --hash=sha256:970284a88b99673ccb2e4e334cfb38a10aab7cd44f7457564d11898a74b62d0a \ - --hash=sha256:972c85d205b51e30e59525694670de6a8a89691186012535f9d7dbaa230e42c3 \ - --hash=sha256:9a1ef3b66e38ef8618ce5fdc7bea3d9f45f3624e2a66295eea5e57966c85909e \ - --hash=sha256:af0e781009aaf59e25c5a678122391cb0f345ac0ec272c7961dc5455e1c40066 \ - --hash=sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf \ - --hash=sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b \ - --hash=sha256:c0891a6a97b09c1f3e073a890514d5012eb256845c451bd48f7968ef939bf4ae \ - --hash=sha256:c2723d347ab06e7ddad1a58b2a821218239249a9e4365eaff6649d31180c1669 \ - --hash=sha256:d1f8bf7b90ba55699b3a5e44930e93ff0189aa27186e96071fac7dd0d06a1873 \ - --hash=sha256:d1f9ce122f83b2305592c11d64f181b87153fc2c2bbd3bb4a3dde8303cfb1a6b \ - --hash=sha256:d314ed732c25d29775e84a960c3c60808b682c08d86602ec2c3008e1202e3bb6 \ - --hash=sha256:d636598c8305e1f90b439dbf4f66437de4a5e3c31fdf47ad29542478c8508bbb \ - --hash=sha256:deee1077aae10d8fa88cb02c845cfba9b62c55e1183f52f6ae6a2df6a2187160 \ - --hash=sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c \ - --hash=sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079 \ - --hash=sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d \ - --hash=sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6 - # via - # -r tools/base/requirements.in +coverage==6.0 \ + --hash=sha256:08fd55d2e00dac4c18a2fa26281076035ec86e764acdc198b9185ce749ada58f \ + --hash=sha256:11ce082eb0f7c2bbfe96f6c8bcc3a339daac57de4dc0f3186069ec5c58da911c \ + --hash=sha256:17983f6ccc47f4864fd16d20ff677782b23d1207bf222d10e4d676e4636b0872 \ + --hash=sha256:25df2bc53a954ba2ccf230fa274d1de341f6aa633d857d75e5731365f7181749 \ + --hash=sha256:274a612f67f931307706b60700f1e4cf80e1d79dff6c282fc9301e4565e78724 \ + --hash=sha256:3dfb23cc180b674a11a559183dff9655beb9da03088f3fe3c4f3a6d200c86f05 \ + --hash=sha256:43bada49697a62ffa0283c7f01bbc76aac562c37d4bb6c45d56dd008d841194e \ + --hash=sha256:4865dc4a7a566147cbdc2b2f033a6cccc99a7dcc89995137765c384f6c73110b \ + --hash=sha256:581fddd2f883379bd5af51da9233e0396b6519f3d3eeae4fb88867473be6d56e \ + --hash=sha256:5c191e01b23e760338f19d8ba2470c0dad44c8b45e41ac043b2db84efc62f695 \ + --hash=sha256:6e216e4021c934246c308fd3e0d739d9fa8a3f4ea414f584ab90ef9c1592f282 \ + --hash=sha256:72f8c99f1527c5a8ee77c890ea810e26b39fd0b4c2dffc062e20a05b2cca60ef \ + --hash=sha256:7593a49300489d064ebb6c58539f52cbbc4a2e6a4385de5e92cae1563f88a425 \ + --hash=sha256:7844a8c6a0fee401edbf578713c2473e020759267c40261b294036f9d3eb6a2d \ + --hash=sha256:7af2f8e7bb54ace984de790e897f858e88068d8fbc46c9490b7c19c59cf51822 \ + --hash=sha256:7dbda34e8e26bd86606ba8a9c13ccb114802e01758a3d0a75652ffc59a573220 \ + --hash=sha256:82b58d37c47d93a171be9b5744bcc96a0012cbf53d5622b29a49e6be2097edd7 \ + --hash=sha256:8305e14112efb74d0b5fec4df6e41cafde615c2392a7e51c84013cafe945842c \ + --hash=sha256:8426fec5ad5a6e8217921716b504e9b6e1166dc147e8443b4855e329db686282 \ + --hash=sha256:88f1810eb942e7063d051d87aaaa113eb5fd5a7fd2cda03a972de57695b8bb1a \ + --hash=sha256:8da0c4a26a831b392deaba5fdd0cd7838d173b47ce2ec3d0f37be630cb09ef6e \ + --hash=sha256:a9dbfcbc56d8de5580483cf2caff6a59c64d3e88836cbe5fb5c20c05c29a8808 \ + --hash=sha256:aa5d4d43fa18cc9d0c6e02a83de0b9729b5451a9066574bd276481474f0a53ab \ + --hash=sha256:adb0f4c3c8ba8104378518a1954cbf3d891a22c13fd0e0bf135391835f44f288 \ + --hash=sha256:b4ee5815c776dfa3958ba71c7cd4cdd8eb40d79358a18352feb19562fe4408c4 \ + --hash=sha256:b5dd5ae0a9cd55d71f1335c331e9625382239b8cede818fb62d8d2702336dbf8 \ + --hash=sha256:b78dd3eeb8f5ff26d2113c41836bac04a9ea91be54c346826b54a373133c8c53 \ + --hash=sha256:bea681309bdd88dd1283a8ba834632c43da376d9bce05820826090aad80c0126 \ + --hash=sha256:befb5ffa9faabef6dadc42622c73de168001425258f0b7e402a2934574e7a04b \ + --hash=sha256:d795a2c92fe8cb31f6e9cd627ee4f39b64eb66bf47d89d8fcf7cb3d17031c887 \ + --hash=sha256:d82cbef1220703ce56822be7fbddb40736fc1a928ac893472df8aff7421ae0aa \ + --hash=sha256:e63490e8a6675cee7a71393ee074586f7eeaf0e9341afd006c5d6f7eec7c16d7 \ + --hash=sha256:e735ab8547d8a1fe8e58dd765d6f27ac539b395f52160d767b7189f379f9be7a \ + --hash=sha256:fa816e97cfe1f691423078dffa39a18106c176f28008db017b3ce3e947c34aa5 \ + --hash=sha256:fff04bfefb879edcf616f1ce5ea6f4a693b5976bdc5e163f8464f349c25b59f0 + # via + # -r requirements.in # pytest-cov cryptography==3.4.8 \ --hash=sha256:0a7dcbcd3f1913f664aca35d47c1331fce738d44ec34b7be8b9d332151b0b01e \ @@ -252,6 +247,7 @@ docutils==0.16 \ --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc # via + # envoy.docs.sphinx-runner # sphinx # sphinx-rtd-theme # sphinx-tabs @@ -263,34 +259,52 @@ envoy.abstract.command==0.0.3 \ envoy.base.checker==0.0.2 \ --hash=sha256:2ac81efa20fd01fff644ff7dc7fadeac1c3e4dbb6210881ac7a7919ec0e048d8 # via + # -r requirements.in + # envoy.code-format.python-check + # envoy.dependency.pip-check # envoy.distribution.distrotest # envoy.distribution.verify envoy.base.runner==0.0.4 \ --hash=sha256:4eeb2b661f1f0c402df4425852be554a8a83ef5d338bfae69ddcb9b90755379e # via + # -r requirements.in # envoy.base.checker # envoy.distribution.release + # envoy.docs.sphinx-runner # envoy.github.abstract # envoy.gpg.sign envoy.base.utils==0.0.8 \ --hash=sha256:b82e18ab0535207b7136d6980239c9350f7113fa5da7dda781bcb6ad1e05b3ab # via - # -r tools/base/requirements.in + # -r requirements.in + # envoy.code-format.python-check + # envoy.dependency.pip-check # envoy.distribution.distrotest + # envoy.docs.sphinx-runner # envoy.github.release # envoy.gpg.sign +envoy.code-format.python-check==0.0.4 \ + --hash=sha256:5e166102d1f873f0c14640bcef87b46147cbad1cb68888c977acfde7fce96e04 + # via -r requirements.in +envoy.dependency.pip-check==0.0.4 \ + --hash=sha256:3213d77959f65c3c97e9b5d74cb14c02bc02dae64bac2e7c3cb829a2f4e5e40e + # via -r requirements.in envoy.distribution.distrotest==0.0.3 \ --hash=sha256:c094adbd959eb1336f93afc00aedb7ee4e68e8252e2365be816a6f9ede8a3de7 # via envoy.distribution.verify envoy.distribution.release==0.0.4 \ --hash=sha256:41037e0488f0593ce5173739fe0cd1b45a4775f5a47738b85d9d04024ca241a2 - # via -r tools/base/requirements.in + # via -r requirements.in envoy.distribution.verify==0.0.2 \ --hash=sha256:ae59134085de50203edf51c243dbf3301cbe5550db29f0ec6f9ea1c3b82fee1c - # via -r tools/base/requirements.in + # via -r requirements.in envoy.docker.utils==0.0.2 \ --hash=sha256:a12cb57f0b6e204d646cbf94f927b3a8f5a27ed15f60d0576176584ec16a4b76 # via envoy.distribution.distrotest +envoy.docs.sphinx-runner==0.0.3 \ + --hash=sha256:6da14a524cb1ede4c3d3f07c3bf2659405e8fe9191af9041979046c54b0ed35f \ + --hash=sha256:b497c0ed9756e91a9b5f6fbd3bef637b3b5b8597af040c9f89d8a7a414dbecec + # via -r requirements.in envoy.github.abstract==0.0.16 \ --hash=sha256:badf04104492fb6b37ba2163f2b225132ed04aba680beb218e7c7d918564f8ee # via @@ -304,23 +318,24 @@ envoy.gpg.identity==0.0.2 \ # via envoy.gpg.sign envoy.gpg.sign==0.0.3 \ --hash=sha256:31667931f5d7ff05fd809b89748f277511486311c777652af4cb8889bd641049 - # via -r tools/base/requirements.in -flake8-polyfill==1.0.2 \ - --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ - --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda - # via pep8-naming + # via -r requirements.in flake8==3.9.2 \ --hash=sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b \ --hash=sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907 # via - # -r tools/base/requirements.in + # -r requirements.in + # envoy.code-format.python-check # flake8-polyfill # pep8-naming +flake8-polyfill==1.0.2 \ + --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ + --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda + # via pep8-naming frozendict==2.0.6 \ --hash=sha256:3f00de72805cf4c9e81b334f3f04809278b967d2fed84552313a0fcce511beb1 \ --hash=sha256:5d3f75832c35d4df041f0e19c268964cbef29c1eb34cd3517cf883f1c2d089b9 # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.base.runner gidgethub==5.0.1 \ --hash=sha256:3efbd6998600254ec7a2869318bd3ffde38edc3a0d37be0c14bc46b45947b682 \ @@ -332,10 +347,10 @@ gitdb==4.0.7 \ --hash=sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0 \ --hash=sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005 # via gitpython -gitpython==3.1.18 \ - --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b \ - --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 - # via -r tools/base/requirements.in +gitpython==3.1.24 \ + --hash=sha256:dc0a7f2f697657acc8d7f89033e8b1ea94dd90356b2983bca89dc8d2ab3cc647 \ + --hash=sha256:df83fdf5e684fef7c6ee2c02fc68a5ceb7e7e759d08b694088d0cacb4eba59e5 + # via -r requirements.in humanfriendly==9.2 \ --hash=sha256:332da98c24cc150efcc91b5508b19115209272bfdf4b0764a56795932f854271 \ --hash=sha256:f7dba53ac7935fd0b4a2fc9a29e316ddd9ea135fb3052d3d0279d10c18ff9c48 @@ -354,11 +369,11 @@ iniconfig==1.1.1 \ --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 # via pytest -jinja2==3.0.1 \ - --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ - --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 +jinja2==3.0.2 \ + --hash=sha256:827a0e32839ab1600d4eb1c4c33ec5a8edfbc5cb42dafa13b81f182f97784b45 \ + --hash=sha256:8569982d3f0889eed11dd620c706d39b60c36d6d25843961f33f77fb6bc6b20c # via - # -r tools/base/requirements.in + # -r requirements.in # sphinx markupsafe==2.0.1 \ --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \ @@ -471,7 +486,9 @@ packaging==21.0 \ pep8-naming==0.12.1 \ --hash=sha256:4a8daeaeb33cfcde779309fc0c9c0a68a3bbe2ad8a8308b763c5068f86eb9f37 \ --hash=sha256:bb2455947757d162aa4cad55dba4ce029005cd1692f2899a21d51d8630ca7841 - # via -r tools/base/requirements.in + # via + # -r requirements.in + # envoy.code-format.python-check pluggy==1.0.0 \ --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 @@ -495,7 +512,7 @@ pyflakes==2.3.1 \ pygithub==1.55 \ --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b - # via -r tools/base/requirements.in + # via -r requirements.in pygments==2.10.0 \ --hash=sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380 \ --hash=sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6 @@ -534,26 +551,26 @@ pyparsing==2.4.7 \ # via packaging pyreadline==2.1 \ --hash=sha256:4530592fc2e85b25b1a9f79664433da09237c1a270e4d78ea5aa3a2c7229e2d1 - # via -r tools/base/requirements.in -pytest-asyncio==0.15.1 \ - --hash=sha256:2564ceb9612bbd560d19ca4b41347b54e7835c2f792c504f698e05395ed63f6f \ - --hash=sha256:3042bcdf1c5d978f6b74d96a151c4cfb9dcece65006198389ccd7e6c60eb1eea - # via -r tools/base/requirements.in -pytest-cov==2.12.1 \ - --hash=sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a \ - --hash=sha256:261ceeb8c227b726249b376b8526b600f38667ee314f910353fa318caa01f4d7 - # via -r tools/base/requirements.in -pytest-patches==0.0.3 \ - --hash=sha256:6f8cdc8641c708c4812f58ae48d410f373a6fd16cd6cc4dc4d3fb8951df9c92a - # via -r tools/base/requirements.in + # via -r requirements.in pytest==6.2.5 \ --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 \ --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 # via - # -r tools/base/requirements.in + # -r requirements.in # pytest-asyncio # pytest-cov # pytest-patches +pytest-asyncio==0.15.1 \ + --hash=sha256:2564ceb9612bbd560d19ca4b41347b54e7835c2f792c504f698e05395ed63f6f \ + --hash=sha256:3042bcdf1c5d978f6b74d96a151c4cfb9dcece65006198389ccd7e6c60eb1eea + # via -r requirements.in +pytest-cov==3.0.0 \ + --hash=sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6 \ + --hash=sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470 + # via -r requirements.in +pytest-patches==0.0.3 \ + --hash=sha256:6f8cdc8641c708c4812f58ae48d410f373a6fd16cd6cc4dc4d3fb8951df9c92a + # via -r requirements.in python-gnupg==0.4.7 \ --hash=sha256:2061f56b1942c29b92727bf9aecbd3cea3893acc9cccbdc7eb4604285efe4ac7 \ --hash=sha256:3ff5b1bf5e397de6e1fe41a7c0f403dad4e242ac92b345f440eaecfb72a7ebae @@ -593,7 +610,7 @@ pyyaml==5.4.1 \ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.base.utils requests==2.26.0 \ --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ @@ -607,6 +624,10 @@ six==1.16.0 \ # via # pynacl # sphinxcontrib-httpdomain +slackclient==2.9.3 \ + --hash=sha256:07ec8fa76f6aa64852210ae235ff9e637ba78124e06c0b07a7eeea4abb955965 \ + --hash=sha256:2d68d668c02f4038299897e5c4723ab85dd40a3548354924b24f333a435856f8 + # via -r requirements.in smmap==4.0.0 \ --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 @@ -615,28 +636,35 @@ snowballstemmer==2.1.0 \ --hash=sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2 \ --hash=sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914 # via sphinx +sphinx==4.2.0 \ + --hash=sha256:94078db9184491e15bce0a56d9186e0aec95f16ac20b12d00e06d4e36f1058a6 \ + --hash=sha256:98a535c62a4fcfcc362528592f69b26f7caec587d32cd55688db580be0287ae0 + # via + # -r requirements.in + # envoy.docs.sphinx-runner + # sphinx-copybutton + # sphinx-rtd-theme + # sphinx-tabs + # sphinxcontrib-httpdomain + # sphinxext-rediraffe sphinx-copybutton==0.4.0 \ --hash=sha256:4340d33c169dac6dd82dce2c83333412aa786a42dd01a81a8decac3b130dc8b0 \ --hash=sha256:8daed13a87afd5013c3a9af3575cc4d5bec052075ccd3db243f895c07a689386 - # via -r tools/base/requirements.in -sphinx-rtd-theme==0.5.2 \ - --hash=sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a \ - --hash=sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f - # via -r tools/base/requirements.in + # via + # -r requirements.in + # envoy.docs.sphinx-runner +sphinx-rtd-theme==1.0.0 \ + --hash=sha256:4d35a56f4508cfee4c4fb604373ede6feae2a306731d533f409ef5c3496fdbd8 \ + --hash=sha256:eec6d497e4c2195fa0e8b2016b337532b8a699a68bcb22a512870e16925c6a5c + # via + # -r requirements.in + # envoy.docs.sphinx-runner sphinx-tabs==3.2.0 \ --hash=sha256:1e1b1846c80137bd81a78e4a69b02664b98b1e1da361beb30600b939dfc75065 \ --hash=sha256:33137914ed9b276e6a686d7a337310ee77b1dae316fdcbce60476913a152e0a4 - # via -r tools/base/requirements.in -sphinx==4.1.2 \ - --hash=sha256:3092d929cd807926d846018f2ace47ba2f3b671b309c7a89cd3306e80c826b13 \ - --hash=sha256:46d52c6cee13fec44744b8c01ed692c18a640f6910a725cbb938bc36e8d64544 # via - # -r tools/base/requirements.in - # sphinx-copybutton - # sphinx-rtd-theme - # sphinx-tabs - # sphinxcontrib-httpdomain - # sphinxext-rediraffe + # -r requirements.in + # envoy.docs.sphinx-runner sphinxcontrib-applehelp==1.0.2 \ --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 @@ -649,10 +677,12 @@ sphinxcontrib-htmlhelp==2.0.0 \ --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \ --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2 # via sphinx -sphinxcontrib-httpdomain==1.7.0 \ - --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ - --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 - # via -r tools/base/requirements.in +sphinxcontrib-httpdomain==1.8.0 \ + --hash=sha256:2059cfabd0cca8fcc3455cc8ffad92f0915a7d3bb03bfddba078a6a0f35beec5 \ + --hash=sha256:a3396d6350728d574f52458b400f0ac848f8b6913bd41fed95d391d3ffbbade3 + # via + # -r requirements.in + # envoy.docs.sphinx-runner sphinxcontrib-jsmath==1.0.1 \ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 @@ -665,18 +695,21 @@ sphinxcontrib-serializinghtml==1.1.5 \ --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \ --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 # via - # -r tools/base/requirements.in + # -r requirements.in + # envoy.docs.sphinx-runner # sphinx sphinxext-rediraffe==0.2.7 \ --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c - # via -r tools/base/requirements.in + # via + # -r requirements.in + # envoy.docs.sphinx-runner toml==0.10.2 \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f # via + # coverage # pytest - # pytest-cov trycast==0.3.0 \ --hash=sha256:1b7b4c0d4b0d674770a53f34a762e52a6cd6879eb251ab21625602699920080d \ --hash=sha256:687185b812e8d1c45f2ba841e8de7bdcdee0695dcf3464f206800505d4c65f26 @@ -688,6 +721,7 @@ typing-extensions==3.10.0.2 \ # via # aiodocker # aiohttp + # gitpython uritemplate==3.0.1 \ --hash=sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f \ --hash=sha256:5af8ad10cec94f215e3f48112de2022e1d5a37ed427fbd88652fa908f2ab7cae @@ -700,7 +734,7 @@ verboselogs==1.7 \ --hash=sha256:d63f23bf568295b95d3530c6864a0b580cec70e7ff974177dead1e4ffbc6ff49 \ --hash=sha256:e33ddedcdfdafcb3a174701150430b11b46ceb64c2a9a26198c76a156568e427 # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.base.runner # envoy.github.abstract # envoy.github.release @@ -710,7 +744,9 @@ wrapt==1.12.1 \ yapf==0.31.0 \ --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \ --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e - # via -r tools/base/requirements.in + # via + # -r requirements.in + # envoy.code-format.python-check yarl==1.6.3 \ --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ @@ -752,9 +788,9 @@ yarl==1.6.3 \ # via aiohttp # The following packages are considered to be unsafe in a requirements file: -setuptools==58.0.3 \ - --hash=sha256:1ceadf3ea9a821ef305505db995f2e21550ea62500900164278c4b23109204f3 \ - --hash=sha256:5e4c36f55012a46c1b3e4b67a8236d1d73856a90fc7b3207d29bedb7d2bac417 +setuptools==58.2.0 \ + --hash=sha256:2551203ae6955b9876741a26ab3e767bb3242dafe86a32a749ea0d78b6792f11 \ + --hash=sha256:2c55bdb85d5bb460bd2e3b12052b677879cffcf46c0c688f2e5bf51d36001145 # via - # -r tools/base/requirements.in + # -r requirements.in # sphinx diff --git a/tools/base/runner.py b/tools/base/runner.py deleted file mode 100644 index 87d5577b370a0..0000000000000 --- a/tools/base/runner.py +++ /dev/null @@ -1,300 +0,0 @@ -# -# Generic runner class for use by cli implementations -# - -import argparse -import inspect -import logging -import pathlib -import subprocess -import sys -import tempfile -from functools import cached_property, wraps -from typing import Callable, Optional, Tuple, Type, Union - -from frozendict import frozendict - -import coloredlogs # type:ignore -import verboselogs # type:ignore - -LOG_LEVELS = (("debug", logging.DEBUG), ("info", logging.INFO), ("warn", logging.WARN), - ("error", logging.ERROR)) -LOG_FIELD_STYLES: frozendict = frozendict( - name=frozendict(color="blue"), levelname=frozendict(color="cyan", bold=True)) -LOG_FMT = "%(name)s %(levelname)s %(message)s" -LOG_LEVEL_STYLES: frozendict = frozendict( - critical=frozendict(bold=True, color="red"), - debug=frozendict(color="green"), - error=frozendict(color="red", bold=True), - info=frozendict(color="white", bold=True), - notice=frozendict(color="magenta", bold=True), - spam=frozendict(color="green", faint=True), - success=frozendict(bold=True, color="green"), - verbose=frozendict(color="blue"), - warning=frozendict(color="yellow", bold=True)) - - -def catches(errors: Union[Type[Exception], Tuple[Type[Exception], ...]]) -> Callable: - """Method decorator to catch specified errors - - logs and returns 1 for sys.exit if error/s are caught - - can be used as so: - - ```python - - class MyRunner(runner.Runner): - - @runner.catches((MyError, MyOtherError)) - def run(self): - self.myrun() - ``` - - Can work with `async` methods too. - """ - - def wrapper(fun: Callable) -> Callable: - - @wraps(fun) - def wrapped(self, *args, **kwargs) -> Optional[int]: - try: - return fun(self, *args, **kwargs) - except errors as e: - self.log.error(str(e) or repr(e)) - return 1 - - @wraps(fun) - async def async_wrapped(self, *args, **kwargs) -> Optional[int]: - try: - return await fun(self, *args, **kwargs) - except errors as e: - self.log.error(str(e) or repr(e)) - return 1 - - wrapped_fun = async_wrapped if inspect.iscoroutinefunction(fun) else wrapped - - # mypy doesnt trust `@wraps` to give back a `__wrapped__` object so we - # need to code defensively here - wrapping = getattr(wrapped_fun, "__wrapped__", None) - if wrapping: - setattr(wrapping, "__catches__", errors) - return wrapped_fun - - return wrapper - - -def cleansup(fun) -> Callable: - """Method decorator to call `.cleanup()` after run. - - Can work with `sync` and `async` methods. - """ - - @wraps(fun) - def wrapped(self, *args, **kwargs) -> Optional[int]: - try: - return fun(self, *args, **kwargs) - finally: - self.cleanup() - - @wraps(fun) - async def async_wrapped(self, *args, **kwargs) -> Optional[int]: - try: - return await fun(self, *args, **kwargs) - finally: - await self.cleanup() - - # mypy doesnt trust `@wraps` to give back a `__wrapped__` object so we - # need to code defensively here - wrapped_fun = async_wrapped if inspect.iscoroutinefunction(fun) else wrapped - wrapping = getattr(wrapped_fun, "__wrapped__", None) - if wrapping: - setattr(wrapping, "__cleansup__", True) - return wrapped_fun - - -class BazelRunError(Exception): - pass - - -class LogFilter(logging.Filter): - - def filter(self, rec): - return rec.levelno in (logging.DEBUG, logging.INFO) - - -class BaseRunner: - - def __init__(self, *args): - self._args = args - - @cached_property - def args(self) -> argparse.Namespace: - """Parsed args""" - return self.parser.parse_known_args(self._args)[0] - - @cached_property - def extra_args(self) -> list: - """Unparsed args""" - return self.parser.parse_known_args(self._args)[1] - - @property - def log_field_styles(self): - return LOG_FIELD_STYLES - - @property - def log_fmt(self): - return LOG_FMT - - @property - def log_level_styles(self): - return LOG_LEVEL_STYLES - - @cached_property - def log(self) -> verboselogs.VerboseLogger: - """Instantiated logger""" - verboselogs.install() - logger = logging.getLogger(self.name) - logger.setLevel(self.log_level) - coloredlogs.install( - field_styles=self.log_field_styles, - level_styles=self.log_level_styles, - fmt=self.log_fmt, - level='DEBUG', - logger=logger, - isatty=True) - return logger - - @cached_property - def log_level(self) -> int: - """Log level parsed from args""" - return dict(LOG_LEVELS)[self.args.log_level] - - @property - def name(self) -> str: - """Name of the runner""" - return self.__class__.__name__ - - @cached_property - def parser(self) -> argparse.ArgumentParser: - """Argparse parser""" - parser = argparse.ArgumentParser(allow_abbrev=False) - self.add_arguments(parser) - return parser - - @cached_property - def path(self) -> pathlib.Path: - return pathlib.Path(".") - - @cached_property - def stdout(self) -> logging.Logger: - """Log to stdout""" - logger = logging.getLogger("stdout") - logger.setLevel(self.log_level) - handler = logging.StreamHandler(sys.stdout) - handler.setFormatter(logging.Formatter("%(message)s")) - logger.addHandler(handler) - return logger - - @cached_property - def tempdir(self) -> tempfile.TemporaryDirectory: - """If you call this property, remember to call `.cleanup` - - For `run` methods this should be done by decorating the method with - `@runner.cleansup` - """ - if self._missing_cleanup: - self.log.warning( - "Tempdir created but instance has a `run` method which is not decorated with `@runner.cleansup`" - ) - return tempfile.TemporaryDirectory() - - def add_arguments(self, parser: argparse.ArgumentParser) -> None: - """Override this method to add custom arguments to the arg parser""" - parser.add_argument( - "--log-level", - "-l", - choices=[level[0] for level in LOG_LEVELS], - default="info", - help="Log level to display") - - @property - def _missing_cleanup(self) -> bool: - run_fun = getattr(self, "run", None) - return bool( - run_fun - and not getattr(getattr(run_fun, "__wrapped__", object()), "__cleansup__", False)) - - def _cleanup_tempdir(self) -> None: - if "tempdir" in self.__dict__: - self.tempdir.cleanup() - del self.__dict__["tempdir"] - - -class Runner(BaseRunner): - - def cleanup(self) -> None: - self._cleanup_tempdir() - - -class AsyncRunner(BaseRunner): - - async def cleanup(self) -> None: - self._cleanup_tempdir() - - -class ForkingAdapter: - - def __init__(self, context: Runner): - self.context = context - - def __call__(self, *args, **kwargs) -> subprocess.CompletedProcess: - return self.subproc_run(*args, **kwargs) - - def subproc_run( - self, *args, capture_output: bool = True, **kwargs) -> subprocess.CompletedProcess: - """Fork a subprocess, using self.context.path as the cwd by default""" - kwargs["cwd"] = kwargs.get("cwd", self.context.path) - return subprocess.run(*args, capture_output=capture_output, **kwargs) - - -class BazelAdapter: - - def __init__(self, context: "ForkingRunner"): - self.context = context - - def query(self, query: str) -> list: - """Run a bazel query and return stdout as list of lines""" - resp = self.context.subproc_run(["bazel", "query", f"'{query}'"]) - if resp.returncode: - raise BazelRunError(f"Bazel query failed: {resp}") - return resp.stdout.decode("utf-8").split("\n") - - def run( - self, - target: str, - *args, - capture_output: bool = False, - cwd: str = "", - raises: bool = True) -> subprocess.CompletedProcess: - """Run a bazel target and return the subprocess response""" - args = (("--",) + args) if args else args - bazel_args = ("bazel", "run", target) + args - resp = self.context.subproc_run( - bazel_args, capture_output=capture_output, cwd=cwd or self.context.path) - if resp.returncode and raises: - raise BazelRunError(f"Bazel run failed: {resp}") - return resp - - -class ForkingRunner(Runner): - - @cached_property - def subproc_run(self) -> ForkingAdapter: - return ForkingAdapter(self) - - -class BazelRunner(ForkingRunner): - - @cached_property - def bazel(self) -> BazelAdapter: - return BazelAdapter(self) diff --git a/tools/base/tests/test_aio.py b/tools/base/tests/test_aio.py deleted file mode 100644 index e80747f5d4a8a..0000000000000 --- a/tools/base/tests/test_aio.py +++ /dev/null @@ -1,1289 +0,0 @@ - -import asyncio -import gc -import inspect -import types -from typing import AsyncIterator, AsyncIterable -from unittest.mock import AsyncMock, MagicMock, PropertyMock - -import pytest - -from tools.base import aio - - -@pytest.mark.asyncio -async def test_async_subprocess_parallel(patches): - patched = patches( - "asyncio", - "ProcessPoolExecutor", - "async_subprocess.run", - prefix="tools.base.aio") - procs = [f"PROC{i}" for i in range(0, 3)] - kwargs = {f"KEY{i}": f"VALUE{i}" for i in range(0, 3)} - - async def async_result(result): - return result - - with patched as (m_asyncio, m_future, m_run): - returned = [f"RESULT{i}" for i in range(0, 5)] - m_asyncio.as_completed.return_value = [ - async_result(result) for result in returned] - - results = [] - async for result in aio.async_subprocess.parallel(procs, **kwargs): - results.append(result) - - assert results == returned - assert ( - list(m_future.call_args) - == [(), {}]) - assert ( - list(m_asyncio.as_completed.call_args) - == [(tuple(m_asyncio.ensure_future.return_value for i in range(0, len(procs))), ), {}]) - kwargs["executor"] = m_future.return_value.__enter__.return_value - assert ( - list(list(c) for c in m_run.call_args_list) - == [[(proc,), kwargs] for proc in procs]) - assert ( - list(list(c) for c in m_asyncio.ensure_future.call_args_list) - == [[(m_run.return_value,), {}] for proc in procs]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("loop", [True, False]) -@pytest.mark.parametrize("executor", [None, "EXECUTOR"]) -async def test_async_subprocess_run(patches, loop, executor): - patched = patches( - "asyncio", - "partial", - "subprocess", - prefix="tools.base.aio") - args = [f"ARG{i}" for i in range(0, 3)] - kwargs = {f"KEY{i}": f"VALUE{i}" for i in range(0, 3)} - - if loop: - kwargs["loop"] = AsyncMock() - - if executor: - kwargs["executor"] = executor - - with patched as (m_asyncio, m_partial, m_subproc): - m_asyncio.get_running_loop.return_value = AsyncMock() - if loop: - m_loop = kwargs["loop"] - else: - m_loop = m_asyncio.get_running_loop.return_value - - assert ( - await aio.async_subprocess.run(*args, **kwargs) - == m_loop.run_in_executor.return_value) - - if loop: - assert not m_asyncio.get_running_loop.called - - kwargs.pop("executor", None) - kwargs.pop("loop", None) - - assert ( - list(m_partial.call_args) - == [(m_subproc.run, ) + tuple(args), kwargs]) - assert ( - list(m_loop.run_in_executor.call_args) - == [(executor, m_partial.return_value), {}]) - - -@pytest.mark.parametrize("limit", ["XX", None, "", 0, -1, 73]) -@pytest.mark.parametrize("yield_exceptions", [None, True, False]) -def test_aio_concurrent_constructor(limit, yield_exceptions): - kwargs = {} - if limit == "XX": - limit = None - else: - kwargs["limit"] = limit - if yield_exceptions is not None: - kwargs["yield_exceptions"] = yield_exceptions - - concurrent = aio.concurrent(["CORO"], **kwargs) - assert concurrent._coros == ["CORO"] - assert concurrent._limit == limit - assert ( - concurrent.yield_exceptions - == (False - if yield_exceptions is None - else yield_exceptions)) - assert concurrent._running == [] - - assert concurrent.running_tasks is concurrent._running - assert "running_tasks" in concurrent.__dict__ - - -def test_aio_concurrent_dunder_aiter(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - "concurrent.output", - ("concurrent.submit", dict(new_callable=MagicMock)), - prefix="tools.base.aio") - - with patched as (m_asyncio, m_output, m_submit): - assert concurrent.__aiter__() == m_output.return_value - - assert concurrent.submit_task == m_asyncio.create_task.return_value - assert ( - list(m_submit.call_args) - == [(), {}]) - assert ( - list(m_asyncio.create_task.call_args) - == [(m_submit.return_value, ), {}]) - - -@pytest.mark.parametrize("running", [True, False]) -@pytest.mark.parametrize("submitting", [True, False]) -def test_aio_concurrent_active(patches, running, submitting): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - ("concurrent.submitting", dict(new_callable=PropertyMock)), - ("concurrent.running", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_asyncio, m_submit, m_run): - m_submit.return_value = submitting - m_run.return_value = running - assert concurrent.active == (submitting or running) - - assert "active" not in concurrent.__dict__ - - -def test_aio_concurrent_closing_lock(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - prefix="tools.base.aio") - - with patched as (m_asyncio, ): - assert concurrent.closing_lock == m_asyncio.Lock.return_value - - assert ( - list(m_asyncio.Lock.call_args) - == [(), {}]) - assert "closing_lock" in concurrent.__dict__ - - - -@pytest.mark.parametrize("locked", [True, False]) -def test_aio_concurrent_closed(patches, locked): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.closing_lock", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_closing_lock, ): - m_closing_lock.return_value.locked.return_value = locked - assert concurrent.closed == locked - - assert "closed" not in concurrent.__dict__ - - -@pytest.mark.asyncio -@pytest.mark.parametrize("raises", [None, BaseException, GeneratorExit]) -@pytest.mark.parametrize("close_raises", [None, BaseException]) -async def test_aio_concurrent_coros(patches, raises, close_raises): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.iter_coros", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - results = [] - return_coros = [f"CORO{i}" for i in range(0, 3)] - m_aclose = AsyncMock() - if close_raises: - m_aclose.side_effect = close_raises() - - class Coros: - aclose = m_aclose - - def __call__(self): - return self - - async def __aiter__(self): - if raises: - raise raises("AN ERROR OCCURRED") - for coro in return_coros: - yield coro - - with patched as (m_coros, ): - coros = Coros() - m_coros.return_value = coros - if raises == BaseException: - with pytest.raises(BaseException): - async for coro in concurrent.coros: - pass - else: - async for coro in concurrent.coros: - results.append(coro) - - if raises == GeneratorExit: - assert ( - list(coros.aclose.call_args) - == [(), {}]) - return - - assert not coros.aclose.called - assert "coros" not in concurrent.__dict__ - - if raises: - return - assert results == return_coros - - -def test_aio_concurrent_running_queue(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - prefix="tools.base.aio") - - with patched as (m_asyncio, ): - assert concurrent.running_queue == m_asyncio.Queue.return_value - - assert ( - list(m_asyncio.Queue.call_args) - == [(), {}]) - assert "running_queue" in concurrent.__dict__ - - -@pytest.mark.parametrize("cpus", [None, "", 0, 4, 73]) -def test_aio_concurrent_default_limit(patches, cpus): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "min", - "os", - prefix="tools.base.aio") - - with patched as (m_min, m_os): - m_os.cpu_count.return_value = cpus - assert concurrent.default_limit == m_min.return_value - - assert ( - list(m_min.call_args) - == [(32, (cpus or 0) + 4), {}]) - assert "default_limit" not in concurrent.__dict__ - - -def test_aio_concurrent_consumes_async(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "isinstance", - prefix="tools.base.aio") - - with patched as (m_inst, ): - assert concurrent.consumes_async == m_inst.return_value - - assert ( - list(m_inst.call_args) - == [(["CORO"], (types.AsyncGeneratorType, AsyncIterator, AsyncIterable)), {}]) - assert "consumes_async" in concurrent.__dict__ - - -def test_aio_concurrent_consumes_generator(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "isinstance", - prefix="tools.base.aio") - - with patched as (m_inst, ): - assert concurrent.consumes_generator == m_inst.return_value - - assert ( - list(m_inst.call_args) - == [(["CORO"], (types.AsyncGeneratorType, types.GeneratorType)), {}]) - assert "consumes_generator" in concurrent.__dict__ - - -@pytest.mark.parametrize("limit", [None, "", 0, -1, 73]) -def test_aio_concurrent_limit(patches, limit): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.default_limit", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - concurrent._limit = limit - - with patched as (m_limit, ): - assert concurrent.limit == (limit or m_limit.return_value) - - if limit: - assert not m_limit.called - - assert "limit" in concurrent.__dict__ - - -@pytest.mark.parametrize("limit", [None, "", 0, -1, 73]) -def test_aio_concurrent_nolimit(patches, limit): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.limit", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_limit, ): - m_limit.return_value = limit - assert concurrent.nolimit == (limit == -1) - - assert "nolimit" in concurrent.__dict__ - - -def test_aio_concurrent_out(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - prefix="tools.base.aio") - - with patched as (m_asyncio, ): - assert concurrent.out == m_asyncio.Queue.return_value - - assert ( - list(m_asyncio.Queue.call_args) - == [(), {}]) - assert "out" in concurrent.__dict__ - - -@pytest.mark.parametrize("empty", [True, False]) -def test_aio_concurrent_running(patches, empty): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.running_queue", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_running_queue, ): - m_running_queue.return_value.empty.return_value = empty - assert concurrent.running == (not empty) - - assert "running" not in concurrent.__dict__ - - -def test_aio_concurrent_sem(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - ("concurrent.limit", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_asyncio, m_limit): - assert concurrent.sem == m_asyncio.Semaphore.return_value - - assert ( - list(m_asyncio.Semaphore.call_args) - == [(m_limit.return_value, ), {}]) - assert "sem" in concurrent.__dict__ - - -def test_aio_concurrent_submission_lock(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - prefix="tools.base.aio") - - with patched as (m_asyncio, ): - assert concurrent.submission_lock == m_asyncio.Lock.return_value - - assert ( - list(m_asyncio.Lock.call_args) - == [(), {}]) - assert "submission_lock" in concurrent.__dict__ - - -@pytest.mark.parametrize("locked", [True, False]) -def test_aio_concurrent_submitting(patches, locked): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.submission_lock", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_submission_lock, ): - m_submission_lock.return_value.locked.return_value = locked - assert concurrent.submitting == locked - - assert "submitting" not in concurrent.__dict__ - - -@pytest.mark.asyncio -async def test_aio_concurrent_cancel(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.cancel_tasks", dict(new_callable=AsyncMock)), - ("concurrent.close", dict(new_callable=AsyncMock)), - ("concurrent.close_coros", dict(new_callable=AsyncMock)), - ("concurrent.sem", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - waiter = MagicMock() - - class SubmitTask: - def __init__(self): - self.cancel = MagicMock() - - def __await__(self): - waiter() - yield - - concurrent.submit_task = SubmitTask() - - with patched as (m_cancel, m_close, m_coros, m_sem): - assert not await concurrent.cancel() - - assert ( - list(m_close.call_args) - == [(), {}]) - assert ( - list(m_sem.return_value.release.call_args) - == [(), {}]) - assert ( - list(m_cancel.call_args) - == [(), {}]) - assert ( - list(m_coros.call_args) - == [(), {}]) - assert ( - list(waiter.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("bad", range(0, 8)) -async def test_aio_concurrent_cancel_tasks(patches, bad): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.running_tasks", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - tasks = [] - waiter = MagicMock() - - class Task: - def __init__(self, i): - self.i = i - self.cancel = MagicMock() - - def __await__(self): - waiter() - if self.i == bad: - raise BaseException("AN ERROR OCCURRED") - - for i in range(0, 7): - tasks.append(Task(i)) - - with patched as (m_running, ): - m_running.return_value = tasks - assert not await concurrent.cancel_tasks() - - assert ( - list(list(c) for c in waiter.call_args_list) - == [[(), {}]] * 7) - for task in tasks: - assert ( - list(task.cancel.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("closed", [True, False]) -async def test_aio_concurrent_close(patches, closed): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.closed", dict(new_callable=PropertyMock)), - ("concurrent.closing_lock", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_closed, m_lock): - m_closed.return_value = closed - m_lock.return_value.acquire = AsyncMock() - assert not await concurrent.close() - - if closed: - assert not m_lock.called - else: - assert ( - list(m_lock.return_value.acquire.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("consumes_generator", [True, False]) -@pytest.mark.parametrize("bad", range(0, 8)) -async def test_aio_concurrent_close_coros(patches, consumes_generator, bad): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "concurrent.close", - ("concurrent.iter_coros", dict(new_callable=PropertyMock)), - ("concurrent.consumes_generator", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - coros = [] - for i in range(0, 7): - coro = MagicMock() - if i == bad: - coro.close.side_effect = BaseException("AN ERROR OCCURRED") - coros.append(coro) - - async def iter_coros(): - for coro in coros: - yield coro - - with patched as (m_close, m_iter, m_isgen): - m_isgen.return_value = consumes_generator - m_iter.return_value = iter_coros - assert not await concurrent.close_coros() - - if consumes_generator: - assert not m_iter.called - return - assert ( - list(m_iter.call_args) - == [(), {}]) - for coro in coros: - assert ( - list(coro.close.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -async def test_aio_concurrent_create_task(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - "concurrent.remember_task", - ("concurrent.task", dict(new_callable=MagicMock)), - ("concurrent.running_queue", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_asyncio, m_rem, m_task, m_running_queue): - assert not await concurrent.create_task("CORO") - - assert ( - list(m_running_queue.return_value.put_nowait.call_args) - == [(None, ), {}]) - assert ( - list(m_task.call_args) - == [("CORO", ), {}]) - assert ( - list(m_asyncio.create_task.call_args) - == [(m_task.return_value, ), {}]) - assert ( - list(m_rem.call_args) - == [(m_asyncio.create_task.return_value, ), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("closed", [True, False]) -@pytest.mark.parametrize("active", [True, False]) -async def test_aio_concurrent_exit_on_completion(patches, active, closed): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.active", dict(new_callable=PropertyMock)), - ("concurrent.closed", dict(new_callable=PropertyMock)), - ("concurrent.out", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_active, m_closed, m_out): - m_out.return_value.put = AsyncMock() - m_active.return_value = active - m_closed.return_value = closed - assert not await concurrent.exit_on_completion() - - if closed or active: - assert not m_out.called - return - assert ( - list(m_out.return_value.put.call_args) - == [(aio._sentinel, ), {}]) - - -@pytest.mark.parametrize("closed", [True, False]) -def test_aio_concurrent_forget_task(patches, closed): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.closed", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - concurrent._running = MagicMock() - - with patched as (m_closed, ): - m_closed.return_value = closed - assert not concurrent.forget_task("TASK") - - if closed: - assert not concurrent._running.remove.called - return - assert ( - list(concurrent._running.remove.call_args) - == [("TASK", ), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("raises", [True, False]) -@pytest.mark.parametrize("consumes_async", [True, False]) -async def test_aio_concurrent_iter_coros(patches, raises, consumes_async): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.consumes_async", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - coros = [f"CORO{i}" for i in range(0, 7)] - exception = BaseException("AN RAISES OCCURRED") - - def iter_coros(): - if raises: - raise exception - for coro in coros: - yield coro - - async def async_iter_coros(): - if raises: - raise exception - for coro in coros: - yield coro - - concurrent._coros = ( - async_iter_coros() - if consumes_async - else iter_coros()) - results = [] - - with patched as (m_async, ): - m_async.return_value = consumes_async - - async for result in concurrent.iter_coros(): - results.append(result) - - if raises: - error = results[0] - assert isinstance(error, aio.ConcurrentIteratorError) - assert error.args[0] is exception - assert results == [error] - return - assert results == coros - - -@pytest.mark.asyncio -@pytest.mark.parametrize("closed", [True, False]) -@pytest.mark.parametrize("nolimit", [True, False]) -@pytest.mark.parametrize("decrement", [None, True, False]) -async def test_aio_concurrent_on_task_complete(patches, closed, nolimit, decrement): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.exit_on_completion", dict(new_callable=AsyncMock)), - ("concurrent.closed", dict(new_callable=PropertyMock)), - ("concurrent.out", dict(new_callable=PropertyMock)), - ("concurrent.running_queue", dict(new_callable=PropertyMock)), - ("concurrent.nolimit", dict(new_callable=PropertyMock)), - ("concurrent.sem", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - kwargs = {} - if decrement is not None: - kwargs["decrement"] = decrement - - with patched as (m_complete, m_closed, m_out, m_running_queue, m_nolimit, m_sem): - m_nolimit.return_value = nolimit - m_closed.return_value = closed - m_out.return_value.put = AsyncMock() - assert not await concurrent.on_task_complete("RESULT", **kwargs) - - if closed: - assert not m_complete.called - assert not m_nolimit.called - assert not m_sem.called - assert not m_running_queue.called - assert not m_out.return_value.put.called - return - - assert ( - list(m_out.return_value.put.call_args) - == [("RESULT", ), {}]) - if nolimit: - assert not m_sem.return_value.release.called - else: - assert ( - list(m_sem.return_value.release.call_args) - == [(), {}]) - if decrement or decrement is None: - assert ( - list(m_running_queue.return_value.get_nowait.call_args) - == [(), {}]) - else: - assert not m_running_queue.return_value.get_nowait.called - assert ( - list(m_complete.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("result_count", range(0, 7)) -@pytest.mark.parametrize("error", [True, False]) -@pytest.mark.parametrize("should_error", [True, False]) -async def test_aio_concurrent_output(patches, result_count, error, should_error): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "concurrent.should_error", - ("concurrent.cancel", dict(new_callable=AsyncMock)), - ("concurrent.close", dict(new_callable=AsyncMock)), - ("concurrent.out", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - exception = Exception() - - class DummyQueue: - _running_queue = 0 - - async def get(self): - if result_count == 0: - return aio._sentinel - if result_count > self._running_queue: - self._running_queue += 1 - if error and result_count == self._running_queue: - return exception - return f"RESULT {self._running_queue}" - return aio._sentinel - - def should_error(self, result): - return error and should_error and (result_count == self._running_queue) - - q = DummyQueue() - results = [] - - with patched as (m_error, m_cancel, m_close, m_out): - m_out.return_value.get.side_effect = q.get - m_error.side_effect = q.should_error - if result_count and error and should_error: - with pytest.raises(Exception): - async for result in concurrent.output(): - results.append(result) - else: - async for result in concurrent.output(): - results.append(result) - - if result_count and error and should_error: - # last one errored - assert results == [f"RESULT {i}" for i in range(1, result_count)] - assert ( - list(list(c) for c in m_error.call_args_list) - == [[(result,), {}] for result in results] + [[(exception,), {}]]) - assert ( - list(m_cancel.call_args) - == [(), {}]) - assert not m_close.called - return - - assert ( - list(list(c) for c in m_close.call_args_list) - == [[(), {}]]) - assert not m_cancel.called - - if not result_count: - assert results == [] - return - - if error: - assert ( - results - == [f"RESULT {i}" for i in range(1, result_count)] + [exception]) - return - # all results returned correctly - assert results == [f"RESULT {i}" for i in range(1, result_count + 1)] - - -@pytest.mark.asyncio -@pytest.mark.parametrize("closed_before", [True, False]) -@pytest.mark.parametrize("closed_after", [True, False]) -@pytest.mark.parametrize("nolimit", [True, False]) -async def test_aio_concurrent_ready(patches, closed_before, closed_after, nolimit): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.closed", dict(new_callable=PropertyMock)), - ("concurrent.nolimit", dict(new_callable=PropertyMock)), - ("concurrent.sem", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - class DummyCloser: - order_mock = MagicMock() - close_calls = 0 - - async def _acquire(self): - self.order_mock("ACQUIRE") - - def _nolimit(self): - self.order_mock("NOLIMIT") - return nolimit - - def _closed(self): - self.order_mock("CLOSED") - self.close_calls += 1 - if self.close_calls == 1: - return closed_before - if self.close_calls == 2: - return closed_after - - closer = DummyCloser() - - with patched as (m_closed, m_nolimit, m_sem): - m_nolimit.side_effect = closer._nolimit - m_closed.side_effect = closer._closed - m_sem.return_value.acquire = closer._acquire - assert ( - await concurrent.ready() - == ((not closed_before and not closed_after) - if not nolimit else not closed_before)) - - if closed_before: - assert not m_nolimit.called - assert not m_sem.called - assert ( - list(list(c) for c in closer.order_mock.call_args_list) - == [[('CLOSED',), {}]]) - return - if nolimit: - assert not m_sem.called - assert ( - list(list(c) for c in closer.order_mock.call_args_list) - == [[('CLOSED',), {}], - [('NOLIMIT',), {}]]) - return - assert ( - list(list(c) for c in closer.order_mock.call_args_list) - == [[('CLOSED',), {}], - [('NOLIMIT',), {}], - [('ACQUIRE',), {}], - [('CLOSED',), {}]]) - - -def test_aio_concurrent_remember_task(): - concurrent = aio.concurrent(["CORO"]) - concurrent._running = MagicMock() - task = MagicMock() - assert not concurrent.remember_task(task) - assert ( - list(concurrent._running.append.call_args) - == [(task, ), {}]) - assert ( - list(task.add_done_callback.call_args) - == [(concurrent.forget_task, ), {}]) - - -@pytest.mark.parametrize("result", [None, "RESULT", aio.ConcurrentError, aio.ConcurrentExecutionError, aio.ConcurrentIteratorError]) -@pytest.mark.parametrize("yield_exceptions", [True, False]) -def test_aio_concurrent_should_error(result, yield_exceptions): - concurrent = aio.concurrent(["CORO"]) - concurrent.yield_exceptions = yield_exceptions - - if isinstance(result, type) and issubclass(result, BaseException): - result = result() - - assert ( - concurrent.should_error(result) - == ((isinstance(result, aio.ConcurrentIteratorError) - or isinstance(result, aio.ConcurrentError) and not yield_exceptions))) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("coros", range(0, 7)) -@pytest.mark.parametrize("unready", range(0, 8)) -@pytest.mark.parametrize("valid_raises", [None, Exception, aio.ConcurrentError]) -@pytest.mark.parametrize("iter_errors", [True, False]) -async def test_aio_concurrent_submit(patches, coros, unready, valid_raises, iter_errors): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "isinstance", - "concurrent.validate_coro", - ("concurrent.exit_on_completion", dict(new_callable=AsyncMock)), - ("concurrent.create_task", dict(new_callable=AsyncMock)), - ("concurrent.on_task_complete", dict(new_callable=AsyncMock)), - ("concurrent.ready", dict(new_callable=AsyncMock)), - ("concurrent.coros", dict(new_callable=PropertyMock)), - ("concurrent.out", dict(new_callable=PropertyMock)), - ("concurrent.submission_lock", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - m_order = MagicMock() - - class DummyReady: - counter = 0 - - def ready(self): - if self.counter >= unready: - self.counter += 1 - return False - self.counter += 1 - return True - - ready = DummyReady() - - async def acquire(): - m_order("ACQUIRE") - - def release(): - m_order("RELEASE") - - corolist = [MagicMock() for coro in range(1, coros)] - - async def iter_coros(): - for coro in corolist: - m_order(coro) - yield coro - - valid_errors = ( - (valid_raises == Exception) - and coros > 1 - and not unready == 0 - and not iter_errors) - - with patched as (m_inst, m_valid, m_exit, m_create, m_complete, m_ready, m_coros, m_out, m_lock): - m_out.return_value.put = AsyncMock() - m_inst.return_value = iter_errors - m_valid.side_effect = valid_raises - m_ready.side_effect = ready.ready - m_coros.return_value = iter_coros() - m_lock.return_value.acquire.side_effect = acquire - m_lock.return_value.release.side_effect = release - - if valid_errors: - with pytest.raises(Exception): - await concurrent.submit() - else: - assert not await concurrent.submit() - - if valid_errors: - assert not m_lock.return_value.called - assert not m_exit.called - else: - assert ( - list(m_lock.return_value.release.call_args) - == [(), {}]) - assert ( - list(m_exit.call_args) - == [(), {}]) - - if coros < 2: - assert not m_valid.called - assert not m_inst.called - assert not m_complete.called - assert not m_create.called - assert not m_ready.called - assert not m_out.return_value.put.called - return - - should_close_coro = ( - not iter_errors - and not valid_errors - and (len(corolist) > unready)) - - if should_close_coro: - assert corolist[unready].close.called - else: - assert not any(coro.close.called for coro in corolist) - - if iter_errors: - assert ( - list(list(c) for c in m_out.return_value.put.call_args_list) - == [[(corolist[0], ), {}]]) - assert ( - list(list(c) for c in m_inst.call_args_list) - == [[(corolist[0], aio.ConcurrentIteratorError), {}]]) - assert not m_ready.called - assert not m_valid.called - assert not m_complete.called - assert not m_create.called - return - - if valid_errors: - assert ( - list(list(c) for c in m_inst.call_args_list) - == [[(corolist[0], aio.ConcurrentIteratorError), {}]]) - assert ( - list(list(c) for c in m_ready.call_args_list) - == [[(), {}]]) - assert ( - list(list(c) for c in m_valid.call_args_list) - == [[(corolist[0], ), {}]]) - assert not m_complete.called - assert not m_create.called - assert ( - list(list(c) for c in m_order.call_args_list) - == ([[('ACQUIRE',), {}], - [(corolist[0],), {}]])) - return - - assert not m_out.return_value.put.called - assert ( - list(list(c) for c in m_ready.call_args_list) - == [[(), {}]] * min(coros - 1, unready + 1 or 1)) - assert ( - list(list(c) for c in m_valid.call_args_list) - == [[(corolist[i - 1], ), {}] for i in range(1, min(coros, unready + 1))]) - assert ( - list(list(c) for c in m_order.call_args_list) - == ([[('ACQUIRE',), {}]] - + [[(corolist[i - 1],), {}] for i in range(1, min(coros, unready + 2))] - + [[('RELEASE',), {}]])) - if valid_raises: - assert len(m_complete.call_args_list) == max(min(coros - 1, unready), 0) - for c in m_complete.call_args_list: - error = list(c)[0][0] - assert isinstance(error, aio.ConcurrentError) - assert ( - list(c) - == [(error,), {'decrement': False}]) - assert not m_create.called - return - assert not m_complete.called - assert ( - list(list(c) for c in m_create.call_args_list) - == [[(corolist[i - 1],), {}] for i in range(1, min(coros, unready + 1))]) - - -class OtherException(BaseException): - pass - - -@pytest.mark.asyncio -@pytest.mark.parametrize("raises", [None, Exception, OtherException]) -async def test_aio_concurrent_task(patches, raises): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "concurrent.on_task_complete", - prefix="tools.base.aio") - - if raises: - exception = raises("AN ERROR OCCURRED") - - async def coro(): - if raises: - raise exception - return 23 - - with patched as (m_complete, ): - assert not await concurrent.task(coro()) - - result = m_complete.call_args[0][0] - - if not raises: - assert result == 23 - else: - assert isinstance(result, aio.ConcurrentExecutionError) - assert result.args[0] is exception - assert ( - list(m_complete.call_args) - == [(result, ), {}]) - - -@pytest.mark.parametrize("awaitable", [True, False]) -@pytest.mark.parametrize( - "state", - [inspect.CORO_CLOSED, - inspect.CORO_CREATED, - inspect.CORO_RUNNING, - inspect.CORO_SUSPENDED]) -def test_aio_concurrent_validate_coro(patches, awaitable, state): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "inspect.getcoroutinestate", - prefix="tools.base.aio") - - # we cant patch inspect.isawaitable without fooing unittest - def unawaitable(): - pass - - async def coro(): - pass - - awaits = ( - coro() - if awaitable - else unawaitable) - - with patched as (m_state, ): - m_state.return_value = state - - if awaitable and state == inspect.CORO_CREATED: - assert not concurrent.validate_coro(awaits) - else: - with pytest.raises(aio.ConcurrentError) as e: - concurrent.validate_coro(awaits) - - if not awaitable: - assert ( - e.value.args[0] - == f'Provided input was not a coroutine: {awaits}') - assert not m_state.called - return - - awaits.close() - assert ( - list(m_state.call_args) - == [(awaits, ), {}]) - - if state != inspect.CORO_CREATED: - assert ( - e.value.args[0] - == f'Provided coroutine has already been fired: {awaits}') - - -async def aiter(items): - for item in items: - yield item - - -@pytest.mark.asyncio -@pytest.mark.parametrize("limit", list(range(0, 4)) + [-1]) -@pytest.mark.parametrize("yield_exceptions", [None, True, False]) -@pytest.mark.parametrize("iter_type", [list, tuple, set, iter, aiter]) -@pytest.mark.parametrize( - "coros", - [["HAPPY"], - ["HAPPY"] * 2 + ["SAD"] + ["HAPPY"] * 3, - ["HAPPY"] * 7, - ["HAPPY"] * 2 + ["RAISE"] + ["HAPPY"] * 3, - ["SAD"] * 2 + ["HAPPY"] * 3, - ["HAPPY"] * 2 + ["CABBAGE"] + ["HAPPY"] * 3, - ["HAPPY"] * 2 + ["FIRED"] + ["HAPPY"] * 3]) -async def test_aio_concurrent_integration(limit, yield_exceptions, iter_type, coros): - # This is an integration/black-box test that only measures inputs/outputs and the - # effect of using the utility with them on them - - # `HAPPY` - a happy coroutine ready to be fired - # `SAD` - a sad coroutine that will raise a `SadError` when fired - # `FIRED` - a coroutine that has already been fired - # `RAISE` - raise an error in the iterator - # `CABBAGE` - leafy vegetable of the brassica family - - tasks_at_the_beginning = len(asyncio.all_tasks()) - - kwargs = {} - - if yield_exceptions is not None: - kwargs["yield_exceptions"] = yield_exceptions - - if limit: - kwargs["limit"] = limit - - class SadError(Exception): - pass - - class LoopError(Exception): - pass - - async def happy(): - # this makes happy return after sad (ie errors) and tests the ordering of responses - # and the handling of pending tasks when errors occur - await asyncio.sleep(.01) - return "HAPPY" - - fired = happy() - await fired - - async def sad(): - raise SadError - - def coro_gen(): - for coro in coros: - if coro == "RAISE": - raise LoopError() - if coro == "HAPPY": - yield happy() - elif coro == "SAD": - yield sad() - elif coro == "FIRED": - yield fired - else: - yield coro - - all_good = all(coro == "HAPPY" for coro in coros) - iter_raises = any(coro == "RAISE" for coro in coros) - - if iter_raises: - # we can only test the generator types for errors - # during iteration - ie if `list`, `tuple` etc contain - # errors, they would raise now. - if not iter_type in [iter, aiter]: - return - generated_coros = coro_gen() - else: - generated_coros = list(coro_gen()) - expected_err_index = next((i for i, x in enumerate(coros) if x != 'HAPPY'), None) - - results = [] - concurrent = aio.concurrent(iter_type(generated_coros), **kwargs) - - if (not all_good and not yield_exceptions) or iter_raises: - if iter_raises: - with pytest.raises(aio.ConcurrentIteratorError) as e: - async for result in concurrent: - results.append(result) - assert isinstance(e.value.args[0], LoopError) - return - else: - coro_fail = ( - any(not inspect.isawaitable(coro) for coro in generated_coros) - or any(coro == "FIRED" for coro in coros)) - if coro_fail: - with pytest.raises(aio.ConcurrentError): - async for result in concurrent: - results.append(result) - else: - with pytest.raises(aio.ConcurrentExecutionError): - async for result in concurrent: - results.append(result) - - # for iterators there is no way of knowing that more awaitables were - # on the way when failure happened, so these need to be closed here - if iter_type in (iter, aiter): - for coro in generated_coros[expected_err_index:]: - if not isinstance(coro, str): - coro.close() - - if limit < 1 and iter_type != set: - # as all jobs are submitted concurrently (the default is higher than - # tne number of test jobs, and -1 forces no limit) and as sad is - # faster than happy, we get no results - assert results == [] - elif iter_type != set: - # because the ordering on sets is indeterminate the results are unpredictable - # therefore the easiest thing is to just exclude them from this test - assert results == coros[:expected_err_index - (expected_err_index % limit)] - - # this can probs be removed, i think it was caused by unhandled GeneratorExit - await asyncio.sleep(.001) - gc.collect() - assert len(asyncio.all_tasks()) == tasks_at_the_beginning - return - - async for result in concurrent: - results.append(result) - - assert len(asyncio.all_tasks()) == tasks_at_the_beginning - - def mangled_results(): - # replace the errors with the test strings - for result in results: - if isinstance(result, aio.ConcurrentExecutionError): - yield "SAD" - elif isinstance(result, aio.ConcurrentError): - if "CABBAGE" in result.args[0]: - yield "CABBAGE" - else: - yield "FIRED" - else: - yield result - - if expected_err_index: - err_index = ( - expected_err_index - if limit == 0 - else expected_err_index - (expected_err_index % limit)) - - if expected_err_index and err_index >= limit and limit not in [0, -1]: - # the error is at the beginning of whichever batch its in - expected = ["HAPPY"] * 6 - expected[err_index] = coros[err_index] - else: - # the error is in the first batch so its at the beginning - expected = [x for x in list(coros) if x != "HAPPY"] + [x for x in list(coros) if x == "HAPPY"] - - if iter_type == set: - assert set(expected) == set(mangled_results()) - else: - assert expected == list(mangled_results()) diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py deleted file mode 100644 index e3c7d3c155e8c..0000000000000 --- a/tools/base/tests/test_checker.py +++ /dev/null @@ -1,1017 +0,0 @@ -import logging -from unittest.mock import MagicMock, patch, PropertyMock - -import pytest - -from tools.base.checker import ( - AsyncChecker, BaseChecker, BazelChecker, Checker, CheckerSummary) -from tools.base.runner import BazelRunner - - -class DummyChecker(Checker): - - def __init__(self): - self.args = PropertyMock() - - -class DummyBazelChecker(BazelChecker): - - def __init__(self): - self.args = PropertyMock() - - -class DummyCheckerWithChecks(Checker): - checks = ("check1", "check2") - - def __init__(self, *args): - self.check1 = MagicMock() - self.check2 = MagicMock() - - def check_check1(self): - self.check1() - - def check_check2(self): - self.check2() - - -def test_checker_constructor(): - super_mock = patch("tools.base.checker.runner.Runner.__init__") - - with super_mock as m_super: - checker = Checker("path1", "path2", "path3") - - assert ( - list(m_super.call_args) - == [('path1', 'path2', 'path3'), {}]) - assert checker.summary_class == CheckerSummary - - assert checker.active_check == "" - assert "active_check" not in checker.__dict__ - - -def test_checker_diff(): - checker = Checker("path1", "path2", "path3") - args_mock = patch( - "tools.base.checker.Checker.args", - new_callable=PropertyMock) - - with args_mock as m_args: - assert checker.diff == m_args.return_value.diff - assert "diff" not in checker.__dict__ - - -@pytest.mark.parametrize( - "errors", - [{}, dict(exiting="EEK"), dict(notexiting="OK")]) -def test_checker_exiting(errors): - checker = Checker("path1", "path2", "path3") - checker.errors = errors - assert checker.exiting == bool("exiting" in errors) - assert "exiting" not in checker.__dict__ - - -def test_checker_error_count(): - checker = Checker("path1", "path2", "path3") - checker.errors = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) - assert checker.error_count == 15 - assert "error_count" not in checker.__dict__ - - -def test_checker_failed(): - checker = Checker("path1", "path2", "path3") - checker.errors = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) - assert checker.failed == {'foo': 3, 'bar': 5, 'baz': 7} - assert "failed" not in checker.__dict__ - - -def test_checker_fix(): - checker = Checker("path1", "path2", "path3") - args_mock = patch( - "tools.base.checker.Checker.args", - new_callable=PropertyMock) - - with args_mock as m_args: - assert checker.fix == m_args.return_value.fix - assert "fix" not in checker.__dict__ - - -@pytest.mark.parametrize("failed", [True, False]) -@pytest.mark.parametrize("warned", [True, False]) -def test_checker_has_failed(patches, failed, warned): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.failed", dict(new_callable=PropertyMock)), - ("Checker.warned", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_failed, m_warned): - m_failed.return_value = failed - m_warned.return_value = warned - result = checker.has_failed - - if failed or warned: - assert result is True - else: - assert result is False - assert "has_failed" not in checker.__dict__ - - -@pytest.mark.parametrize("path", [None, "PATH"]) -@pytest.mark.parametrize("paths", [[], ["PATH0"]]) -@pytest.mark.parametrize("isdir", [True, False]) -def test_checker_path(patches, path, paths, isdir): - class DummyError(Exception): - pass - checker = Checker("path1", "path2", "path3") - patched = patches( - "pathlib", - ("Checker.args", dict(new_callable=PropertyMock)), - ("Checker.parser", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_plib, m_args, m_parser): - m_parser.return_value.error = DummyError - m_args.return_value.path = path - m_args.return_value.paths = paths - m_plib.Path.return_value.is_dir.return_value = isdir - if not path and not paths: - with pytest.raises(DummyError) as e: - checker.path - assert ( - e.value.args - == ('Missing path: `path` must be set either as an arg or with --path',)) - elif not isdir: - with pytest.raises(DummyError) as e: - checker.path - assert ( - e.value.args - == ('Incorrect path: `path` must be a directory, set either as first arg or with --path',)) - else: - assert checker.path == m_plib.Path.return_value - assert ( - list(m_plib.Path.call_args) - == [(path or paths[0],), {}]) - assert "path" in checker.__dict__ - if path or paths: - assert ( - list(m_plib.Path.return_value.is_dir.call_args) - == [(), {}]) - - -@pytest.mark.parametrize("paths", [[], ["path1", "path2"]]) -def test_checker_paths(patches, paths): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.args", dict(new_callable=PropertyMock)), - ("Checker.path", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_args, m_path): - m_args.return_value.paths = paths - result = checker.paths - - if paths: - assert result == paths - else: - assert result == [m_path.return_value] - assert "paths" not in checker.__dict__ - - -@pytest.mark.parametrize("summary", [True, False]) -@pytest.mark.parametrize("error_count", [0, 1]) -@pytest.mark.parametrize("warning_count", [0, 1]) -@pytest.mark.parametrize("exiting", [True, False]) -def test_checker_show_summary(patches, summary, error_count, warning_count, exiting): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.args", dict(new_callable=PropertyMock)), - ("Checker.exiting", dict(new_callable=PropertyMock)), - ("Checker.error_count", dict(new_callable=PropertyMock)), - ("Checker.warning_count", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_args, m_exit, m_errors, m_warnings): - m_args.return_value.summary = summary - m_errors.return_value = error_count - m_warnings.return_value = warning_count - m_exit.return_value = exiting - result = checker.show_summary - - if exiting: - assert result is False - elif summary or error_count or warning_count: - assert result is True - else: - assert result is False - assert "show_summary" not in checker.__dict__ - - -def test_checker_status(patches): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.success_count", dict(new_callable=PropertyMock)), - ("Checker.error_count", dict(new_callable=PropertyMock)), - ("Checker.warning_count", dict(new_callable=PropertyMock)), - ("Checker.failed", dict(new_callable=PropertyMock)), - ("Checker.warned", dict(new_callable=PropertyMock)), - ("Checker.succeeded", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as args: - (m_success_count, m_error_count, m_warning_count, - m_failed, m_warned, m_succeeded) = args - assert ( - checker.status - == dict( - success=m_success_count.return_value, - errors=m_error_count.return_value, - warnings=m_warning_count.return_value, - failed=m_failed.return_value, - warned=m_warned.return_value, - succeeded=m_succeeded.return_value)) - assert "status" not in checker.__dict__ - - -def test_checker_succeeded(): - checker = Checker("path1", "path2", "path3") - checker.success = dict( - foo=["check"] * 3, - bar=["check"] * 5, - baz=["check"] * 7) - assert ( - checker.succeeded - == dict(foo=3, bar=5, baz=7)) - assert "succeeded" not in checker.__dict__ - - -def test_checker_success_count(): - checker = Checker("path1", "path2", "path3") - checker.success = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) - assert checker.success_count == 15 - assert "success_count" not in checker.__dict__ - - -def test_checker_summary(): - checker = Checker("path1", "path2", "path3") - summary_mock = patch( - "tools.base.checker.Checker.summary_class", - new_callable=PropertyMock) - - with summary_mock as m_summary: - assert checker.summary == m_summary.return_value.return_value - - assert ( - list(m_summary.return_value.call_args) - == [(checker,), {}]) - assert "summary" in checker.__dict__ - - -def test_checker_warned(): - checker = Checker("path1", "path2", "path3") - checker.warnings = dict( - foo=["check"] * 3, - bar=["check"] * 5, - baz=["check"] * 7) - assert ( - checker.warned - == dict(foo=3, bar=5, baz=7)) - assert "warned" not in checker.__dict__ - - -def test_checker_warning_count(): - checker = Checker("path1", "path2", "path3") - checker.warnings = dict(foo=["warn"] * 3, bar=["warn"] * 5, baz=["warn"] * 7) - assert checker.warning_count == 15 - assert "warning_count" not in checker.__dict__ - - -def test_checker_add_arguments(patches): - checker = DummyCheckerWithChecks("path1", "path2", "path3") - parser = MagicMock() - patched = patches( - "runner.Runner.add_arguments", - prefix="tools.base.checker") - - with patched as (m_super, ): - assert checker.add_arguments(parser) is None - - assert ( - list(m_super.call_args) - == [(parser,), {}]) - - assert ( - list(list(c) for c in parser.add_argument.call_args_list) - == [[('--fix',), - {'action': 'store_true', - 'default': False, - 'help': 'Attempt to fix in place'}], - [('--diff',), - {'action': 'store_true', - 'default': False, - 'help': 'Display a diff in the console where available'}], - [('--warning', '-w'), - {'choices': ['warn', 'error'], - 'default': 'warn', - 'help': 'Handle warnings as warnings or errors'}], - [('--summary',), - {'action': 'store_true', - 'default': False, - 'help': 'Show a summary of check runs'}], - [('--summary-errors',), - {'type': int, - 'default': 5, - 'help': 'Number of errors to show in the summary, -1 shows all'}], - [('--summary-warnings',), - {'type': int, - 'default': 5, - 'help': 'Number of warnings to show in the summary, -1 shows all'}], - [('--check', '-c'), - {'choices': ("check1", "check2"), - 'nargs': '*', - 'help': 'Specify which checks to run, can be specified for multiple checks'}], - [('--config-check1',), - {'default': '', - 'help': 'Custom configuration for the check1 check'}], - [('--config-check2',), - {'default': '', - 'help': 'Custom configuration for the check2 check'}], - [('--path', '-p'), - {'default': None, - 'help': 'Path to the test root (usually Envoy source dir). If not specified the first path of paths is used'}], - [('paths',), - {'nargs': '*', - 'help': 'Paths to check. At least one path must be specified, or the `path` argument should be provided'}]]) - - -TEST_ERRORS: tuple = ( - {}, - dict(myerror=[]), - dict(myerror=["a", "b", "c"]), - dict(othererror=["other1", "other2", "other3"]), - dict(othererror=["other1", "other2", "other3"], myerror=["a", "b", "c"])) - - -@pytest.mark.parametrize("log", [True, False]) -@pytest.mark.parametrize("log_type", [None, "fatal"]) -@pytest.mark.parametrize("errors", TEST_ERRORS) -@pytest.mark.parametrize("newerrors", [[], ["err1", "err2", "err3"]]) -def test_checker_error(log, log_type, errors, newerrors): - checker = Checker("path1", "path2", "path3") - log_mock = patch( - "tools.base.checker.Checker.log", - new_callable=PropertyMock) - checker.errors = errors.copy() - result = 1 if newerrors else 0 - - with log_mock as m_log: - if log_type: - assert checker.error("mycheck", newerrors, log, log_type=log_type) == result - else: - assert checker.error("mycheck", newerrors, log) == result - - if not newerrors: - assert not m_log.called - assert "mycheck" not in checker.errors - return - - assert checker.errors["mycheck"] == errors.get("mycheck", []) + newerrors - for k, v in errors.items(): - if k != "mycheck": - assert checker.errors[k] == v - if log: - assert ( - list(list(c) for c in getattr(m_log.return_value, log_type or "error").call_args_list) - == [[(f'[mycheck] err{i}',), {}] for i in range(1, 4)]) - else: - assert not getattr(m_log.return_value, log_type or "error").called - - -def test_checker_exit(patches): - checker = Checker("path1", "path2", "path3") - patched = patches( - "Checker.error", - ("Checker.log", dict(new_callable=PropertyMock)), - ("Checker.stdout", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_error, m_log, m_stdout): - assert checker.exit() == m_error.return_value - - assert ( - list(m_log.return_value.handlers.__getitem__.call_args) - == [(0,), {}]) - assert ( - list(m_log.return_value.handlers.__getitem__.return_value.setLevel.call_args) - == [(logging.FATAL,), {}]) - assert ( - list(m_stdout.return_value.handlers.__getitem__.call_args) - == [(0,), {}]) - assert ( - list(m_stdout.return_value.handlers.__getitem__.return_value.setLevel.call_args) - == [(logging.FATAL,), {}]) - assert ( - list(m_error.call_args) - == [('exiting', ['Keyboard exit']), {'log_type': 'fatal'}]) - - -TEST_CHECKS: tuple = ( - None, - (), - ("check1", ), - ("check1", "check2", "check3"), - ("check3", "check4", "check5"), - ("check4", "check5")) - - -@pytest.mark.parametrize("checks", TEST_CHECKS) -def test_checker_get_checks(checks): - checker = Checker("path1", "path2", "path3") - checker.checks = ("check1", "check2", "check3") - args_mock = patch( - "tools.base.checker.Checker.args", - new_callable=PropertyMock) - - with args_mock as m_args: - m_args.return_value.check = checks - if checks: - assert ( - checker.get_checks() - == [check for check in checker.checks if check in checks or []]) - else: - assert checker.get_checks() == checker.checks - - -def test_checker_on_check_begin(patches): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.log", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_log, ): - assert not checker.on_check_begin("checkname") - - assert checker.active_check == "checkname" - assert ( - list(m_log.return_value.notice.call_args) - == [('[checkname] Running check',), {}]) - - -@pytest.mark.parametrize("errors", [[], ["CHECK1", "CHECK2", "CHECK3"], ["CHECK2", "CHECK3"]]) -@pytest.mark.parametrize("warnings", [[], ["CHECK1", "CHECK2", "CHECK3"], ["CHECK2", "CHECK3"]]) -@pytest.mark.parametrize("exiting", [True, False]) -def test_checker_on_check_run(patches, errors, warnings, exiting): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.exiting", dict(new_callable=PropertyMock)), - ("Checker.log", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - check = "CHECK1" - checker.errors = errors - checker.warnings = warnings - checker._active_check = check - - with patched as (m_exit, m_log): - m_exit.return_value = exiting - assert not checker.on_check_run(check) - - assert checker.active_check == "" - - if exiting: - assert not m_log.called - return - - if check in errors: - assert ( - list(m_log.return_value.error.call_args) - == [('[CHECK1] Check failed',), {}]) - assert not m_log.return_value.warning.called - assert not m_log.return_value.success.called - return - - if check in warnings: - assert ( - list(m_log.return_value.warning.call_args) - == [('[CHECK1] Check has warnings',), {}]) - assert not m_log.return_value.error.called - assert not m_log.return_value.info.called - return - - assert ( - list(m_log.return_value.success.call_args) - == [(f'[{check}] Check completed successfully',), {}]) - assert not m_log.return_value.warning.called - assert not m_log.return_value.error.called - - -def test_checker_on_checks_begin(): - checker = Checker("path1", "path2", "path3") - assert checker.on_checks_begin() is None - - -@pytest.mark.parametrize("failed", [True, False]) -@pytest.mark.parametrize("show_summary", [True, False]) -def test_checker_on_checks_complete(patches, failed, show_summary): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.has_failed", dict(new_callable=PropertyMock)), - ("Checker.show_summary", dict(new_callable=PropertyMock)), - ("Checker.summary", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_failed, m_show_summary, m_summary): - m_failed.return_value = failed - m_show_summary.return_value = show_summary - assert checker.on_checks_complete() is (1 if failed else 0) - - if show_summary: - assert ( - list(m_summary.return_value.print_summary.call_args) - == [(), {}]) - else: - assert not m_summary.return_value.print_summary.called - - -@pytest.mark.parametrize("raises", [None, KeyboardInterrupt, Exception]) -def test_checker_run(patches, raises): - checker = DummyCheckerWithChecks("path1", "path2", "path3") - patched = patches( - "Checker.exit", - "Checker.get_checks", - "Checker.on_check_begin", - "Checker.on_check_run", - "Checker.on_checks_begin", - "Checker.on_checks_complete", - ("Checker.log", dict(new_callable=PropertyMock)), - ("Checker.name", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_exit, m_get, m_check, m_run, m_begin, m_complete, m_log, m_name): - m_get.return_value = ("check1", "check2") - - if raises: - m_begin.side_effect = raises() - - if raises == KeyboardInterrupt: - result = checker.run() - - else: - with pytest.raises(raises): - checker.run() - else: - assert checker.run() == m_complete.return_value - - assert ( - list(m_begin.call_args) - == [(), {}]) - assert ( - list(m_complete.call_args) - == [(), {}]) - - if raises == KeyboardInterrupt: - assert ( - list(m_exit.call_args) - == [(), {}]) - return - - assert not m_exit.called - - if raises: - return - - assert ( - list(m_get.call_args) - == [(), {}]) - assert ( - list(list(c) for c in m_check.call_args_list) - == [[(f'check{i}',), {}] for i in range(1, 3)]) - assert ( - list(list(c) for c in m_run.call_args_list) - == [[(f'check{i}',), {}] for i in range(1, 3)]) - assert ( - list(checker.check1.call_args) - == [(), {}]) - assert ( - list(checker.check2.call_args) - == [(), {}]) - - -TEST_WARNS: tuple = ( - {}, - dict(mywarn=[]), - dict(mywarn=["a", "b", "c"]), - dict(otherwarn=["other1", "other2", "other3"]), - dict(otherwarn=["other1", "other2", "other3"], mywarn=["a", "b", "c"])) - - -@pytest.mark.parametrize("log", [True, False]) -@pytest.mark.parametrize("warns", TEST_WARNS) -def test_checker_warn(patches, log, warns): - checker = Checker("path1", "path2", "path3") - log_mock = patch( - "tools.base.checker.Checker.log", - new_callable=PropertyMock) - checker.warnings = warns.copy() - - with log_mock as m_log: - checker.warn("mycheck", ["warn1", "warn2", "warn3"], log) - - assert checker.warnings["mycheck"] == warns.get("mycheck", []) + ["warn1", "warn2", "warn3"] - for k, v in warns.items(): - if k != "mycheck": - assert checker.warnings[k] == v - if log: - assert ( - list(list(c) for c in m_log.return_value.warning.call_args_list) - == [[(f'[mycheck] warn{i}',), {}] for i in range(1, 4)]) - else: - assert not m_log.return_value.warn.called - - -TEST_SUCCESS: tuple = ( - {}, - dict(mysuccess=[]), - dict(mysuccess=["a", "b", "c"]), - dict(othersuccess=["other1", "other2", "other3"]), - dict(othersuccess=["other1", "other2", "other3"], mysuccess=["a", "b", "c"])) - - -@pytest.mark.parametrize("log", [True, False]) -@pytest.mark.parametrize("success", TEST_SUCCESS) -def test_checker_succeed(patches, log, success): - checker = Checker("path1", "path2", "path3") - log_mock = patch( - "tools.base.checker.Checker.log", - new_callable=PropertyMock) - checker.success = success.copy() - - with log_mock as m_log: - checker.succeed("mycheck", ["success1", "success2", "success3"], log) - - assert checker.success["mycheck"] == success.get("mycheck", []) + ["success1", "success2", "success3"] - for k, v in success.items(): - if k != "mycheck": - assert checker.success[k] == v - if log: - assert ( - list(list(c) for c in m_log.return_value.success.call_args_list) - == [[(f'[mycheck] success{i}',), {}] for i in range(1, 4)]) - else: - assert not m_log.return_value.success.called - - -# CheckerSummary tests - -def test_checker_summary_constructor(): - checker = DummyChecker() - summary = CheckerSummary(checker) - assert summary.checker == checker - - -@pytest.mark.parametrize("max_errors", [-1, 0, 1, 23]) -def test_checker_summary_max_errors(max_errors): - checker = DummyChecker() - summary = CheckerSummary(checker) - checker.args.summary_errors = max_errors - assert summary.max_errors == max_errors - - -@pytest.mark.parametrize("max_warnings", [-1, 0, 1, 23]) -def test_checker_summary_max_warnings(max_warnings): - checker = DummyChecker() - summary = CheckerSummary(checker) - checker.args.summary_warnings = max_warnings - assert summary.max_warnings == max_warnings - - -def test_checker_summary_print_summary(patches): - checker = DummyChecker() - summary = CheckerSummary(checker) - patched = patches( - "CheckerSummary.print_failed", - "CheckerSummary.print_status", - prefix="tools.base.checker") - - with patched as (m_failed, m_status): - summary.print_summary() - assert ( - list(list(c) for c in m_failed.call_args_list) - == [[('warnings',), {}], [('errors',), {}]]) - assert m_status.called - - -TEST_SECTIONS: tuple = ( - ("MSG1", ["a", "b", "c"]), - ("MSG2", []), - ("MSG3", None)) - - -@pytest.mark.parametrize("section", TEST_SECTIONS) -def test_checker_summary_section(section): - checker = DummyChecker() - summary = CheckerSummary(checker) - message, lines = section - expected = [ - "Summary", - "-" * 80, - f"{message}"] - if lines: - expected += lines - assert summary._section(message, lines) == expected - - -@pytest.mark.parametrize("errors", (True, False)) -@pytest.mark.parametrize("warnings", (True, False)) -def test_checker_summary_print_status(patches, errors, warnings): - checker = DummyChecker() - summary = CheckerSummary(checker) - summary.checker = MagicMock() - summary.checker.errors = errors - summary.checker.warnings = warnings - - assert not summary.print_status() - - if errors: - assert ( - list(summary.checker.log.error.call_args) - == [(f"{summary.checker.status}",), {}]) - assert not summary.checker.log.warning.called - assert not summary.checker.log.info.called - return - - if warnings: - assert ( - list(summary.checker.log.warning.call_args) - == [(f"{summary.checker.status}",), {}]) - assert not summary.checker.log.error.called - assert not summary.checker.log.info.called - return - - assert ( - list(summary.checker.log.info.call_args) - == [(f"{summary.checker.status}",), {}]) - assert not summary.checker.log.error.called - assert not summary.checker.log.warning.called - - -@pytest.mark.parametrize("problem_type", ("errors", "warnings")) -@pytest.mark.parametrize("max_display", (-1, 0, 1, 23)) -@pytest.mark.parametrize("problems", ({}, dict(foo=["problem1"]), dict(foo=["problem1", "problem2"], bar=["problem3", "problem4"]))) -def test_checker_summary_print_failed(patches, problem_type, max_display, problems): - checker = DummyChecker() - summary = CheckerSummary(checker) - patched = patches( - "CheckerSummary._section", - (f"CheckerSummary.max_{problem_type}", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_section, m_max): - summary.checker = MagicMock() - setattr(summary.checker, f"{problem_type}", problems) - m_max.return_value = max_display - m_section.return_value = ["A", "B", "C"] - summary.print_failed(problem_type) - - if not problems: - assert not summary.checker.log.error.called - assert not m_section.called - return - - output = ( - summary.checker.log.warning if problem_type == "warnings" else summary.checker.log.error) - - assert ( - list(output.call_args) - == [("".join(['A\nB\nC\n'] * len(problems)),), {}]) - - if max_display == 0: - expected = [ - [(f"{summary.checker.name} {prob}", []), {}] - for prob in problems] - else: - def _problems(prob): - return ( - problems[prob][:max_display] - if max_display > 0 - else problems[prob]) - def _extra(prob): - return ( - f": (showing first {max_display} of {len(problems)})" - if len(problems[prob]) > max_display and max_display >= 0 - else (":" - if max_display != 0 - else "")) - expected = [ - [(f"{summary.checker.name} {prob}{_extra(prob)}", _problems(prob)), {}] - for prob in problems] - assert ( - list(list(c) for c in m_section.call_args_list) - == expected) - - -# BazelChecker test - -def test_bazelchecker_constructor(): - checker = DummyBazelChecker() - assert isinstance(checker, BazelRunner) - assert isinstance(checker, Checker) - - -# AsyncChecker tests - -def test_asynchecker_constructor(): - checker = AsyncChecker() - assert isinstance(checker, BaseChecker) - - -@pytest.mark.parametrize("raises", [None, KeyboardInterrupt, Exception]) -def test_asynchecker_run(patches, raises): - checker = AsyncChecker() - - patched = patches( - "asyncio", - "BaseChecker.exit", - ("AsyncChecker._run", dict(new_callable=MagicMock)), - ("AsyncChecker.on_checks_complete", dict(new_callable=MagicMock)), - prefix="tools.base.checker") - - with patched as (m_async, m_exit, m_run, m_complete): - if raises: - m_run.side_effect = raises - - if raises == KeyboardInterrupt: - result = checker.run() - else: - with pytest.raises(raises): - checker.run() - return - else: - assert ( - checker.run() - == m_async.get_event_loop.return_value.run_until_complete.return_value) - - if raises == KeyboardInterrupt: - assert ( - list(m_exit.call_args) - == [(), {}]) - assert ( - list(m_async.get_event_loop.call_args_list[1]) - == [(), {}]) - assert ( - list(m_async.get_event_loop.return_value.run_until_complete.call_args) - == [(m_complete.return_value,), {}]) - assert ( - list(m_complete.call_args) - == [(), {}]) - assert result == m_async.get_event_loop.return_value.run_until_complete.return_value - return - - assert not m_exit.called - assert ( - list(m_async.get_event_loop.call_args) - == [(), {}]) - assert ( - list(m_async.get_event_loop.return_value.run_until_complete.call_args) - == [(m_run.return_value,), {}]) - assert ( - list(m_run.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -async def test_asynchecker_on_check_begin(patches): - checker = AsyncChecker() - patched = patches( - "BaseChecker.on_check_begin", - prefix="tools.base.checker") - - with patched as (m_super, ): - assert not await checker.on_check_begin("CHECKNAME") - - assert ( - list(m_super.call_args) - == [('CHECKNAME',), {}]) - - -@pytest.mark.asyncio -async def test_asynchecker_on_check_run(patches): - checker = AsyncChecker() - patched = patches( - "BaseChecker.on_check_run", - prefix="tools.base.checker") - - with patched as (m_super, ): - assert not await checker.on_check_run("CHECKNAME") - - assert ( - list(m_super.call_args) - == [('CHECKNAME',), {}]) - - -@pytest.mark.asyncio -async def test_asynchecker_on_checks_begin(patches): - checker = AsyncChecker() - patched = patches( - "BaseChecker.on_checks_begin", - prefix="tools.base.checker") - - with patched as (m_super, ): - assert not await checker.on_checks_begin() - - assert ( - list(m_super.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -async def test_asynchecker_on_checks_complete(patches): - checker = AsyncChecker() - - patched = patches( - "BaseChecker.on_checks_complete", - prefix="tools.base.checker") - - with patched as (m_complete, ): - assert ( - await checker.on_checks_complete() - == m_complete.return_value) - - assert ( - list(m_complete.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("raises", [True, False]) -@pytest.mark.parametrize("exiting", [True, False]) -async def test_asynchecker__run(patches, raises, exiting): - _check1 = MagicMock() - _check2 = MagicMock() - _check3 = MagicMock() - - class AsyncCheckerWithChecks(AsyncChecker): - - async def check_check1(self): - return _check1() - - async def check_check2(self): - return _check2() - - async def check_check3(self): - return _check3() - - class SomeError(Exception): - pass - - checker = AsyncCheckerWithChecks() - - patched = patches( - "BaseChecker.log", - "BaseChecker.get_checks", - "AsyncChecker.on_checks_begin", - "AsyncChecker.on_check_begin", - "AsyncChecker.on_check_run", - "AsyncChecker.on_checks_complete", - ("AsyncChecker.exiting", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_log, m_checks, m_begin, m_check, m_run, m_complete, m_exit): - m_checks.return_value = ["check1", "check2", "check3"] - m_exit.return_value = exiting - if raises: - m_begin.side_effect = SomeError("AN ERROR OCCURRED") - - with pytest.raises(SomeError): - await checker._run() - elif exiting: - assert await checker._run() == 1 - else: - assert await checker._run() == m_complete.return_value - - assert ( - list(m_begin.call_args) - == [(), {}]) - - if exiting: - return - - assert ( - list(m_complete.call_args) - == [(), {}]) - - if raises: - return - - assert ( - list(m_checks.call_args) - == [(), {}]) - assert ( - list(list(c) for c in m_check.call_args_list) - == [[(f'check{i}',), {}] for i in range(1, 4)]) - for check in [_check1, _check2, _check3]: - assert ( - list(check.call_args) - == [(), {}]) - assert ( - list(list(c) for c in m_run.call_args_list) - == [[('check1',), {}], [('check2',), {}], [('check3',), {}]]) diff --git a/tools/base/tests/test_runner.py b/tools/base/tests/test_runner.py deleted file mode 100644 index 4b88cda46c080..0000000000000 --- a/tools/base/tests/test_runner.py +++ /dev/null @@ -1,710 +0,0 @@ -import importlib -import logging -import sys -from unittest.mock import AsyncMock, MagicMock, patch, PropertyMock - -import pytest - -from tools.base import runner - - -# this is necessary to fix coverage as these libs are imported before pytest -# is invoked -importlib.reload(runner) - - -class DummyRunner(runner.BaseRunner): - - def __init__(self): - self.args = PropertyMock() - - -class DummyForkingRunner(runner.ForkingRunner): - - def __init__(self): - self.args = PropertyMock() - - -class OneError(Exception): - - def __str__(self): - return "" - - pass - - -class TwoError(Exception): - pass - - -def _failing_runner(errors): - - class DummyFailingRunner: - # this dummy runner calls the _runner mock - # when its run/run_async methods are called - # and optionally raises some type of error - # to ensure they are caught as expected - - log = PropertyMock() - _runner = MagicMock() - - def __init__(self, raises=None): - self.raises = raises - - @runner.catches(errors) - def run(self, *args, **kwargs): - result = self._runner(*args, **kwargs) - if self.raises: - raise self.raises("AN ERROR OCCURRED") - return result - - @runner.catches(errors) - async def run_async(self, *args, **kwargs): - result = self._runner(*args, **kwargs) - if self.raises: - raise self.raises("AN ERROR OCCURRED") - return result - - return DummyFailingRunner - - -@pytest.mark.asyncio -@pytest.mark.parametrize("async_fun", [True, False]) -@pytest.mark.parametrize( - "errors", - [OneError, (OneError, TwoError)]) -@pytest.mark.parametrize( - "raises", - [None, OneError, TwoError]) -@pytest.mark.parametrize( - "args", - [(), ("ARG1", "ARG2")]) -@pytest.mark.parametrize( - "kwargs", - [{}, dict(key1="VAL1", key2="VAL2")]) -async def test_catches(errors, async_fun, raises, args, kwargs): - run = _failing_runner(errors)(raises) - should_fail = ( - raises - and not ( - raises == errors - or (isinstance(errors, tuple) - and raises in errors))) - - assert run.run.__wrapped__.__catches__ == errors - assert run.run_async.__wrapped__.__catches__ == errors - - if should_fail: - result = 1 - with pytest.raises(raises): - run.run(*args, **kwargs) if not async_fun else await run.run_async(*args, **kwargs) - else: - result = run.run(*args, **kwargs) if not async_fun else await run.run_async(*args, **kwargs) - - assert ( - list(run._runner.call_args) - == [args, kwargs]) - - if not should_fail and raises: - assert result == 1 - error = run.log.error.call_args[0][0] - _error = raises("AN ERROR OCCURRED") - assert ( - error - == (str(_error) or repr(_error))) - assert ( - list(run.log.error.call_args) - == [(error,), {}]) - else: - assert not run.log.error.called - - if raises: - assert result == 1 - else: - assert result == run._runner.return_value - - -def _cleanup_runner(async_fun, raises): - - class DummyCleanupRunner: - # this dummy runner calls the _runner mock - # when its run/async_fun methods are called - # and optionally raises some type of error - # to ensure they are caught as expected - - log = PropertyMock() - _runner = MagicMock() - - @runner.cleansup - def run(self, *args, **kwargs): - result = self._runner(*args, **kwargs) - if raises: - raise Exception("AN ERROR OCCURRED") - return result - - @runner.cleansup - async def run_async(self, *args, **kwargs): - result = self._runner(*args, **kwargs) - if raises: - raise Exception("AN ERROR OCCURRED") - return result - - return DummyCleanupRunner() - - -@pytest.mark.asyncio -@pytest.mark.parametrize("async_fun", [True, False]) -@pytest.mark.parametrize("raises", [True, False]) -async def test_cleansup(async_fun, raises): - run = _cleanup_runner(async_fun, raises) - args = [f"ARG{i}" for i in range(0, 3)] - kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)} - - assert run.run.__wrapped__.__cleansup__ is True - assert run.run_async.__wrapped__.__cleansup__ is True - - if async_fun: - run.cleanup = AsyncMock() - if raises: - with pytest.raises(Exception): - await run.run_async(*args, **kwargs) - else: - assert ( - await run.run_async(*args, **kwargs) - == run._runner.return_value) - else: - run.cleanup = MagicMock() - if raises: - with pytest.raises(Exception): - run.run(*args, **kwargs) - else: - assert ( - run.run(*args, **kwargs) - == run._runner.return_value) - - assert ( - list(run._runner.call_args) - == [tuple(args), kwargs]) - assert ( - list(run.cleanup.call_args) - == [(), {}]) - - -def test_base_runner_constructor(): - run = runner.BaseRunner("path1", "path2", "path3") - assert run._args == ("path1", "path2", "path3") - assert run.log_field_styles == runner.LOG_FIELD_STYLES - assert run.log_level_styles == runner.LOG_LEVEL_STYLES - assert run.log_fmt == runner.LOG_FMT - - -def test_base_runner_args(): - run = runner.BaseRunner("path1", "path2", "path3") - parser_mock = patch( - "tools.base.runner.BaseRunner.parser", - new_callable=PropertyMock) - - with parser_mock as m_parser: - assert run.args == m_parser.return_value.parse_known_args.return_value.__getitem__.return_value - - assert ( - list(m_parser.return_value.parse_known_args.call_args) - == [(('path1', 'path2', 'path3'),), {}]) - assert ( - list(m_parser.return_value.parse_known_args.return_value.__getitem__.call_args) - == [(0,), {}]) - assert "args" in run.__dict__ - - -def test_base_runner_extra_args(): - run = runner.BaseRunner("path1", "path2", "path3") - parser_mock = patch( - "tools.base.runner.BaseRunner.parser", - new_callable=PropertyMock) - - with parser_mock as m_parser: - assert run.extra_args == m_parser.return_value.parse_known_args.return_value.__getitem__.return_value - - assert ( - list(m_parser.return_value.parse_known_args.call_args) - == [(('path1', 'path2', 'path3'),), {}]) - assert ( - list(m_parser.return_value.parse_known_args.return_value.__getitem__.call_args) - == [(1,), {}]) - assert "extra_args" in run.__dict__ - - -def test_base_runner_log(patches): - run = runner.BaseRunner("path1", "path2", "path3") - patched = patches( - "logging.getLogger", - "LogFilter", - "coloredlogs", - "verboselogs", - ("BaseRunner.log_level", dict(new_callable=PropertyMock)), - ("BaseRunner.log_level_styles", dict(new_callable=PropertyMock)), - ("BaseRunner.log_field_styles", dict(new_callable=PropertyMock)), - ("BaseRunner.log_fmt", dict(new_callable=PropertyMock)), - ("BaseRunner.name", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - with patched as patchy: - (m_logger, m_filter, m_color, m_verb, - m_level, m_lstyle, m_fstyle, m_fmt, m_name) = patchy - assert run.log == m_logger.return_value - - assert ( - list(m_verb.install.call_args) - == [(), {}]) - assert ( - list(m_logger.return_value.setLevel.call_args) - == [(m_level.return_value,), {}]) - assert ( - list(m_logger.return_value.setLevel.call_args) - == [(m_level.return_value,), {}]) - assert ( - list(m_color.install.call_args) - == [(), - {'fmt': m_fmt.return_value, - 'isatty': True, - 'field_styles': m_fstyle.return_value, - 'level': 'DEBUG', - 'level_styles': m_lstyle.return_value, - 'logger': m_logger.return_value}]) - assert "log" in run.__dict__ - - -def test_base_runner_log_level(patches): - run = runner.BaseRunner("path1", "path2", "path3") - patched = patches( - "dict", - ("BaseRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - with patched as (m_dict, m_args): - assert run.log_level == m_dict.return_value.__getitem__.return_value - - assert ( - list(m_dict.call_args) - == [(runner.LOG_LEVELS, ), {}]) - assert ( - list(m_dict.return_value.__getitem__.call_args) - == [(m_args.return_value.log_level,), {}]) - assert "log_level" in run.__dict__ - - -def test_base_runner_name(): - run = DummyRunner() - assert run.name == run.__class__.__name__ - assert "name" not in run.__dict__ - - -def test_base_runner_parser(patches): - run = runner.BaseRunner("path1", "path2", "path3") - patched = patches( - "argparse.ArgumentParser", - "BaseRunner.add_arguments", - prefix="tools.base.runner") - with patched as (m_parser, m_add_args): - assert run.parser == m_parser.return_value - - assert ( - list(m_parser.call_args) - == [(), {"allow_abbrev": False}]) - assert ( - list(m_add_args.call_args) - == [(m_parser.return_value,), {}]) - assert "parser" in run.__dict__ - - -def test_base_runner_path(patches): - run = runner.BaseRunner("path1", "path2", "path3") - patched = patches( - "pathlib", - prefix="tools.base.runner") - - with patched as (m_plib, ): - assert run.path == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(".", ), {}]) - - -def test_base_runner_stdout(patches): - run = runner.BaseRunner("path1", "path2", "path3") - - patched = patches( - "logging", - ("BaseRunner.log_level", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - with patched as (m_log, m_level): - assert run.stdout == m_log.getLogger.return_value - - assert ( - list(m_log.getLogger.call_args) - == [('stdout',), {}]) - assert ( - list(m_log.getLogger.return_value.setLevel.call_args) - == [(m_level.return_value,), {}]) - assert ( - list(m_log.StreamHandler.call_args) - == [(sys.stdout,), {}]) - assert ( - list(m_log.Formatter.call_args) - == [('%(message)s',), {}]) - assert ( - list(m_log.StreamHandler.return_value.setFormatter.call_args) - == [(m_log.Formatter.return_value,), {}]) - assert ( - list(m_log.getLogger.return_value.addHandler.call_args) - == [(m_log.StreamHandler.return_value,), {}]) - - -@pytest.mark.parametrize("missing", [True, False]) -def test_base_runner_tempdir(patches, missing): - run = runner.BaseRunner() - patched = patches( - "tempfile", - ("BaseRunner.log", dict(new_callable=PropertyMock)), - ("BaseRunner._missing_cleanup", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - with patched as (m_tmp, m_log, m_missing): - m_missing.return_value = missing - assert run.tempdir == m_tmp.TemporaryDirectory.return_value - - if missing: - assert ( - list(m_log.return_value.warning.call_args) - == [("Tempdir created but instance has a `run` method which is not decorated with `@runner.cleansup`", ), {}]) - else: - assert not m_log.called - - assert ( - list(m_tmp.TemporaryDirectory.call_args) - == [(), {}]) - assert "tempdir" in run.__dict__ - - -def test_base_runner_add_arguments(): - run = runner.BaseRunner("path1", "path2", "path3") - parser = MagicMock() - - assert run.add_arguments(parser) is None - - assert ( - list(list(c) for c in parser.add_argument.call_args_list) - == [[('--log-level', '-l'), - {'choices': ['debug', 'info', 'warn', 'error'], - 'default': 'info', 'help': 'Log level to display'}], - ]) - - -@pytest.mark.parametrize("has_fun", [True, False]) -@pytest.mark.parametrize("is_wrapped", [True, False]) -@pytest.mark.parametrize("cleansup", [True, False]) -def test_base_runner__missing_cleanup(has_fun, is_wrapped, cleansup): - - def _runner_factory(): - if not has_fun: - return runner.BaseRunner() - - class _Wrap: - if cleansup: - __cleansup__ = True - - class _Wrapper: - if is_wrapped: - __wrapped__ = _Wrap() - - class DummyRunner(runner.BaseRunner): - run = _Wrapper() - - return DummyRunner() - - run = _runner_factory() - - assert ( - run._missing_cleanup - == (has_fun - and not (is_wrapped and cleansup))) - assert "_missing_cleanup" not in run.__dict__ - - -@pytest.mark.parametrize("cached", [True, False]) -def test_base_runner__cleanup_tempdir(patches, cached): - run = runner.BaseRunner() - patched = patches( - ("BaseRunner.tempdir", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - if cached: - run.__dict__["tempdir"] = "TEMPDIR" - - with patched as (m_temp, ): - assert not run._cleanup_tempdir() - - if cached: - assert ( - list(m_temp.return_value.cleanup.call_args) - == [(), {}]) - else: - assert not m_temp.called - assert "tempdir" not in run.__dict__ - - -# LogFilter tests -@pytest.mark.parametrize("level", [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, None, "giraffe"]) -def test_base_runner_log_filter(level): - logfilter = runner.LogFilter() - - class DummyRecord: - levelno = level - - if level in [logging.DEBUG, logging.INFO]: - assert logfilter.filter(DummyRecord()) - else: - assert not logfilter.filter(DummyRecord()) - - -def test_runner_constructor(patches): - patched = patches( - "BaseRunner.__init__", - prefix="tools.base.runner") - args = [f"ARG{i}" for i in range(0, 3)] - kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)} - - with patched as (m_super, ): - m_super.return_value = None - run = runner.Runner(*args, **kwargs) - - assert isinstance(run, runner.BaseRunner) - assert ( - list(m_super.call_args) - == [tuple(args), kwargs]) - - -def test_runner_cleanup(patches): - run = runner.Runner() - patched = patches( - "Runner._cleanup_tempdir", - prefix="tools.base.runner") - - with patched as (m_temp, ): - assert not run.cleanup() - - assert ( - list(m_temp.call_args) - == [(), {}]) - - -def test_async_runner_constructor(patches): - patched = patches( - "BaseRunner.__init__", - prefix="tools.base.runner") - args = [f"ARG{i}" for i in range(0, 3)] - kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)} - - with patched as (m_super, ): - m_super.return_value = None - run = runner.AsyncRunner(*args, **kwargs) - - assert isinstance(run, runner.BaseRunner) - assert ( - list(m_super.call_args) - == [tuple(args), kwargs]) - - -@pytest.mark.asyncio -async def test_async_runner_cleanup(patches): - run = runner.AsyncRunner() - patched = patches( - "AsyncRunner._cleanup_tempdir", - prefix="tools.base.runner") - - with patched as (m_temp, ): - assert not await run.cleanup() - - assert ( - list(m_temp.call_args) - == [(), {}]) - - -# BazelAdapter tests - -def test_bazeladapter_constructor(): - run = DummyRunner() - adapter = runner.BazelAdapter(run) - assert adapter.context == run - - -@pytest.mark.parametrize("query_returns", [0, 1]) -def test_bazeladapter_query(query_returns): - run = DummyForkingRunner() - adapter = runner.BazelAdapter(run) - fork_mock = patch("tools.base.runner.ForkingAdapter.subproc_run") - - with fork_mock as m_fork: - m_fork.return_value.returncode = query_returns - if query_returns: - with pytest.raises(runner.BazelRunError) as result: - adapter.query("BAZEL QUERY") - else: - result = adapter.query("BAZEL QUERY") - - assert ( - list(m_fork.call_args) - == [(['bazel', 'query', "'BAZEL QUERY'"],), {}]) - - if query_returns: - assert result.errisinstance(runner.BazelRunError) - assert ( - result.value.args - == (f"Bazel query failed: {m_fork.return_value}",)) - assert not m_fork.return_value.stdout.decode.called - else: - assert ( - result - == m_fork.return_value.stdout.decode.return_value.split.return_value) - assert ( - list(m_fork.return_value.stdout.decode.call_args) - == [('utf-8',), {}]) - assert ( - list(m_fork.return_value.stdout.decode.return_value.split.call_args) - == [('\n',), {}]) - - -@pytest.mark.parametrize("cwd", [None, "", "SOMEPATH"]) -@pytest.mark.parametrize("raises", [None, True, False]) -@pytest.mark.parametrize("capture_output", [None, True, False]) -@pytest.mark.parametrize("run_returns", [0, 1]) -@pytest.mark.parametrize("args", [(), ("foo",), ("foo", "bar")]) -def test_bazeladapter_run(patches, run_returns, cwd, raises, args, capture_output): - run = DummyForkingRunner() - adapter = runner.BazelAdapter(run) - patched = patches( - "ForkingAdapter.subproc_run", - ("ForkingRunner.path", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - adapter_args = ("BAZEL RUN",) + args - kwargs = {} - if raises is not None: - kwargs["raises"] = raises - if cwd is not None: - kwargs["cwd"] = cwd - if capture_output is not None: - kwargs["capture_output"] = capture_output - - with patched as (m_fork, m_path): - m_fork.return_value.returncode = run_returns - if run_returns and (raises is not False): - with pytest.raises(runner.BazelRunError) as result: - adapter.run(*adapter_args, **kwargs) - else: - result = adapter.run(*adapter_args, **kwargs) - - call_args = (("--",) + args) if args else args - bazel_args = ("bazel", "run", "BAZEL RUN") + call_args - bazel_kwargs = {} - bazel_kwargs["capture_output"] = ( - True - if capture_output is True - else False) - bazel_kwargs["cwd"] = ( - cwd - if cwd - else m_path.return_value) - assert ( - list(m_fork.call_args) - == [(bazel_args,), bazel_kwargs]) - if run_returns and (raises is not False): - assert result.errisinstance(runner.BazelRunError) - assert ( - result.value.args - == (f"Bazel run failed: {m_fork.return_value}",)) - else: - assert result == m_fork.return_value - - -# ForkingAdapter tests - -def test_forkingadapter_constructor(): - run = DummyRunner() - adapter = runner.ForkingAdapter(run) - assert adapter.context == run - - -def test_forkingadapter_call(): - run = DummyRunner() - adapter = runner.ForkingAdapter(run) - fork_mock = patch("tools.base.runner.ForkingAdapter.subproc_run") - - with fork_mock as m_fork: - assert ( - adapter( - "arg1", "arg2", "arg3", - kwa1="foo", - kwa2="bar", - kwa3="baz") - == m_fork.return_value) - assert ( - list(m_fork.call_args) - == [('arg1', 'arg2', 'arg3'), - {'kwa1': 'foo', 'kwa2': 'bar', 'kwa3': 'baz'}]) - - -@pytest.mark.parametrize("args", [(), ("a", "b")]) -@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"]) -@pytest.mark.parametrize("capture_output", ["NONE", True, False]) -def test_forkingadapter_subproc_run(patches, args, cwd, capture_output): - adapter = runner.ForkingAdapter(DummyRunner()) - patched = patches( - "subprocess.run", - ("BaseRunner.path", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - with patched as (m_run, m_path): - kwargs = {} - if cwd != "NONE": - kwargs["cwd"] = cwd - if capture_output != "NONE": - kwargs["capture_output"] = capture_output - assert adapter.subproc_run(*args, **kwargs) == m_run.return_value - - expected = {'capture_output': True, 'cwd': cwd} - if capture_output is False: - expected["capture_output"] = False - if cwd == "NONE": - expected["cwd"] = m_path.return_value - assert ( - list(m_run.call_args) - == [args, expected]) - - -# ForkingRunner tests - -def test_forkingrunner_fork(): - run = runner.ForkingRunner("path1", "path2", "path3") - forking_mock = patch("tools.base.runner.ForkingAdapter") - - with forking_mock as m_fork: - assert run.subproc_run == m_fork.return_value - assert ( - list(m_fork.call_args) - == [(run,), {}]) - assert "subproc_run" in run.__dict__ - - -# BazelRunner tests - -def test_bazelrunner_bazel(): - run = runner.BazelRunner("path1", "path2", "path3") - bazel_mock = patch("tools.base.runner.BazelAdapter") - - with bazel_mock as m_bazel: - assert run.bazel == m_bazel.return_value - assert ( - list(m_bazel.call_args) - == [(run,), {}]) - assert "bazel" in run.__dict__ diff --git a/tools/base/tests/test_utils.py b/tools/base/tests/test_utils.py deleted file mode 100644 index 5ea95efb4ac3a..0000000000000 --- a/tools/base/tests/test_utils.py +++ /dev/null @@ -1,228 +0,0 @@ -import importlib -import sys -from contextlib import contextmanager -from unittest.mock import MagicMock - -import pytest - -from tools.base import utils - - -# this is necessary to fix coverage as these libs are imported before pytest -# is invoked -importlib.reload(utils) - - -def test_util_buffered_stdout(): - stdout = [] - - with utils.buffered(stdout=stdout): - print("test1") - print("test2") - sys.stdout.write("test3\n") - sys.stderr.write("error0\n") - - assert stdout == ["test1", "test2", "test3"] - - -def test_util_buffered_stderr(): - stderr = [] - - with utils.buffered(stderr=stderr): - print("test1") - print("test2") - sys.stdout.write("test3\n") - sys.stderr.write("error0\n") - sys.stderr.write("error1\n") - - assert stderr == ["error0", "error1"] - - -def test_util_buffered_stdout_stderr(): - stdout = [] - stderr = [] - - with utils.buffered(stdout=stdout, stderr=stderr): - print("test1") - print("test2") - sys.stdout.write("test3\n") - sys.stderr.write("error0\n") - sys.stderr.write("error1\n") - - assert stdout == ["test1", "test2", "test3"] - assert stderr == ["error0", "error1"] - - -def test_util_buffered_no_stdout_stderr(): - with pytest.raises(utils.BufferUtilError): - with utils.buffered(): - pass - - -def test_util_nested(): - - fun1_args = [] - fun2_args = [] - - @contextmanager - def fun1(arg): - fun1_args.append(arg) - yield "FUN1" - - @contextmanager - def fun2(arg): - fun2_args.append(arg) - yield "FUN2" - - with utils.nested(fun1("A"), fun2("B")) as (fun1_yield, fun2_yield): - assert fun1_yield == "FUN1" - assert fun2_yield == "FUN2" - - assert fun1_args == ["A"] - assert fun2_args == ["B"] - - -def test_util_coverage_with_data_file(patches): - patched = patches( - "ConfigParser", - "tempfile.TemporaryDirectory", - "os.path.join", - "open", - prefix="tools.base.utils") - - with patched as (m_config, m_tmp, m_join, m_open): - with utils.coverage_with_data_file("PATH") as tmprc: - assert tmprc == m_join.return_value - assert ( - list(m_config.call_args) - == [(), {}]) - assert ( - list(m_config.return_value.read.call_args) - == [('.coveragerc',), {}]) - assert ( - list(m_config.return_value.__getitem__.call_args) - == [('run',), {}]) - assert ( - list(m_config.return_value.__getitem__.return_value.__setitem__.call_args) - == [('data_file', 'PATH'), {}]) - assert ( - list(m_tmp.call_args) - == [(), {}]) - assert ( - list(m_join.call_args) - == [(m_tmp.return_value.__enter__.return_value, '.coveragerc'), {}]) - assert ( - list(m_open.call_args) - == [(m_join.return_value, 'w'), {}]) - assert ( - list(m_config.return_value.write.call_args) - == [(m_open.return_value.__enter__.return_value,), {}]) - - - -@pytest.mark.parametrize( - "tarballs", - [(), tuple("TARB{i}" for i in range(0, 3))]) -def test_util_extract(patches, tarballs): - patched = patches( - "nested", - "pathlib", - "tarfile.open", - prefix="tools.base.utils") - - with patched as (m_nested, m_plib, m_open): - _extractions = [MagicMock(), MagicMock()] - m_nested.return_value.__enter__.return_value = _extractions - - if tarballs: - assert utils.extract("PATH", *tarballs) == m_plib.Path.return_value - else: - with pytest.raises(utils.ExtractError) as e: - utils.extract("PATH", *tarballs) - - if not tarballs: - assert ( - e.value.args[0] - == 'No tarballs specified for extraction to PATH') - assert not m_nested.called - assert not m_open.called - for _extract in _extractions: - assert not _extract.extractall.called - return - - assert ( - list(m_plib.Path.call_args) - == [("PATH", ), {}]) - - for _extract in _extractions: - assert ( - list(_extract.extractall.call_args) - == [(), dict(path="PATH")]) - - assert ( - list(m_open.call_args_list) - == [[(tarb, ), {}] for tarb in tarballs]) - assert ( - list(m_nested.call_args) - == [tuple(m_open.return_value for x in tarballs), {}]) - - -@pytest.mark.parametrize( - "tarballs", - [(), tuple("TARB{i}" for i in range(0, 3))]) -def test_util_untar(patches, tarballs): - patched = patches( - "tempfile.TemporaryDirectory", - "extract", - prefix="tools.base.utils") - - with patched as (m_tmp, m_extract): - with utils.untar(*tarballs) as tmpdir: - assert tmpdir == m_extract.return_value - - assert ( - list(m_tmp.call_args) - == [(), {}]) - assert ( - list(m_extract.call_args) - == [(m_tmp.return_value.__enter__.return_value, ) + tarballs, {}]) - - -def test_util_from_yaml(patches): - patched = patches( - "pathlib", - "yaml", - prefix="tools.base.utils") - - with patched as (m_plib, m_yaml): - assert utils.from_yaml("PATH") == m_yaml.safe_load.return_value - - assert ( - list(m_plib.Path.call_args) - == [("PATH", ), {}]) - assert ( - list(m_yaml.safe_load.call_args) - == [(m_plib.Path.return_value.read_text.return_value, ), {}]) - assert ( - list(m_plib.Path.return_value.read_text.call_args) - == [(), {}]) - - -def test_util_to_yaml(patches): - patched = patches( - "pathlib", - "yaml", - prefix="tools.base.utils") - - with patched as (m_plib, m_yaml): - assert utils.to_yaml("DATA", "PATH") == m_plib.Path.return_value - - assert ( - list(m_yaml.dump.call_args) - == [("DATA", ), {}]) - assert ( - list(m_plib.Path.return_value.write_text.call_args) - == [(m_yaml.dump.return_value, ), {}]) - assert ( - list(m_plib.Path.call_args) - == [("PATH", ), {}]) diff --git a/tools/base/utils.py b/tools/base/utils.py deleted file mode 100644 index ca92cdc4e78be..0000000000000 --- a/tools/base/utils.py +++ /dev/null @@ -1,142 +0,0 @@ -# -# Provides shared utils used by other python modules -# - -import io -import os -import pathlib -import tarfile -import tempfile -from configparser import ConfigParser -from contextlib import ExitStack, contextmanager, redirect_stderr, redirect_stdout -from pathlib import Path -from typing import Callable, ContextManager, Iterator, List, Optional, Union - -import yaml - - -class ExtractError(Exception): - pass - - -# this is testing specific - consider moving to tools.testing.utils -@contextmanager -def coverage_with_data_file(data_file: str) -> Iterator[str]: - """This context manager takes the path of a data file - and creates a custom coveragerc with the data file path included. - - The context is yielded the path to the custom rc file. - """ - parser = ConfigParser() - parser.read(".coveragerc") - parser["run"]["data_file"] = data_file - # use a temporary .coveragerc - with tempfile.TemporaryDirectory() as tmpdir: - tmprc = os.path.join(tmpdir, ".coveragerc") - with open(tmprc, "w") as f: - parser.write(f) - yield tmprc - - -class BufferUtilError(Exception): - pass - - -@contextmanager -def nested(*contexts): - with ExitStack() as stack: - yield [stack.enter_context(context) for context in contexts] - - -@contextmanager -def buffered( - stdout: list = None, - stderr: list = None, - mangle: Optional[Callable[[list], list]] = None) -> Iterator[None]: - """Captures stdout and stderr and feeds lines to supplied lists""" - - mangle = mangle or (lambda lines: lines) - - if stdout is None and stderr is None: - raise BufferUtilError("You must specify stdout and/or stderr") - - contexts: List[Union[redirect_stderr[io.StringIO], redirect_stdout[io.StringIO]]] = [] - - if stdout is not None: - _stdout = io.StringIO() - contexts.append(redirect_stdout(_stdout)) - if stderr is not None: - _stderr = io.StringIO() - contexts.append(redirect_stderr(_stderr)) - - with nested(*contexts): - yield - - if stdout is not None: - _stdout.seek(0) - stdout.extend(mangle(_stdout.read().strip().split("\n"))) - if stderr is not None: - _stderr.seek(0) - stderr.extend(mangle(_stderr.read().strip().split("\n"))) - - -def extract(path: Union[pathlib.Path, str], *tarballs: Union[pathlib.Path, str]) -> pathlib.Path: - if not tarballs: - raise ExtractError(f"No tarballs specified for extraction to {path}") - openers = nested(*tuple(tarfile.open(tarball) for tarball in tarballs)) - - with openers as tarfiles: - for tar in tarfiles: - tar.extractall(path=path) - return pathlib.Path(path) - - -@contextmanager -def untar(*tarballs: Union[pathlib.Path, str]) -> Iterator[pathlib.Path]: - """Untar a tarball into a temporary directory - - for example to list the contents of a tarball: - - ``` - import os - - from tooling.base.utils import untar - - - with untar("path/to.tar") as tmpdir: - print(os.listdir(tmpdir)) - - ``` - - the created temp directory will be cleaned up on - exiting the contextmanager - - """ - with tempfile.TemporaryDirectory() as tmpdir: - yield extract(tmpdir, *tarballs) - - -def from_yaml(path: Union[pathlib.Path, str]) -> Union[dict, list, str, int]: - """Returns the loaded python object from a yaml file given by `path`""" - return yaml.safe_load(pathlib.Path(path).read_text()) - - -def to_yaml(data: Union[dict, list, str, int], path: Union[pathlib.Path, str]) -> pathlib.Path: - """For given `data` dumps as yaml to provided `path`. - - Returns `path` - """ - path = pathlib.Path(path) - path.write_text(yaml.dump(data)) - return path - - -@contextmanager -def cd_and_return(path: Union[pathlib.Path, str]) -> ContextManager[None]: - """Changes working directory to given path and returns to previous working directory on exit""" - prev_cwd = Path.cwd() - try: - os.chdir(path) - yield - finally: - os.chdir(prev_cwd) diff --git a/tools/clang_tools/api_booster/BUILD b/tools/clang_tools/api_booster/BUILD deleted file mode 100644 index d6affe19640b6..0000000000000 --- a/tools/clang_tools/api_booster/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -load( - "//clang_tools/support:clang_tools.bzl", - "clang_tools_cc_binary", - "clang_tools_cc_library", - "clang_tools_cc_test", -) - -licenses(["notice"]) # Apache 2 - -clang_tools_cc_binary( - name = "api_booster", - srcs = ["main.cc"], - deps = [ - ":proto_cxx_utils_lib", - "@clang_tools//:clang_astmatchers", - "@clang_tools//:clang_basic", - "@clang_tools//:clang_tooling", - "@envoy//tools/type_whisperer:api_type_db_lib", - ], -) - -clang_tools_cc_library( - name = "proto_cxx_utils_lib", - srcs = ["proto_cxx_utils.cc"], - hdrs = ["proto_cxx_utils.h"], - deps = [ - "@com_google_absl//absl/container:node_hash_map", - "@com_google_absl//absl/strings", - "@com_google_absl//absl/types:optional", - ], -) - -clang_tools_cc_test( - name = "proto_cxx_utils_test", - srcs = ["proto_cxx_utils_test.cc"], - deps = [":proto_cxx_utils_lib"], -) diff --git a/tools/clang_tools/api_booster/main.cc b/tools/clang_tools/api_booster/main.cc deleted file mode 100644 index 1972a58d560c6..0000000000000 --- a/tools/clang_tools/api_booster/main.cc +++ /dev/null @@ -1,598 +0,0 @@ -// Upgrade a single Envoy C++ file to the latest API version. -// -// Currently this tool is a WIP and only does inference of .pb[.validate].h -// #include locations. This already exercises some of the muscles we need, such -// as AST matching, rudimentary type inference and API type database lookup. -// -// NOLINT(namespace-envoy) - -#include -#include -#include -#include - -// Declares clang::SyntaxOnlyAction. -#include "clang/ASTMatchers/ASTMatchers.h" -#include "clang/ASTMatchers/ASTMatchFinder.h" -#include "clang/Frontend/FrontendActions.h" -#include "clang/Tooling/CommonOptionsParser.h" -#include "clang/Tooling/Core/Replacement.h" -#include "clang/Tooling/Refactoring.h" -#include "clang/Tooling/ReplacementsYaml.h" - -// Declares llvm::cl::extrahelp. -#include "llvm/Support/CommandLine.h" - -#include "proto_cxx_utils.h" - -#include "tools/type_whisperer/api_type_db.h" - -#include "absl/container/node_hash_map.h" -#include "absl/strings/str_cat.h" - -// Enable to see debug log messages. -#ifdef ENABLE_DEBUG_LOG -#define DEBUG_LOG(s) \ - do { \ - std::cerr << (s) << std::endl; \ - } while (0) -#else -#define DEBUG_LOG(s) -#endif - -using namespace Envoy::Tools::TypeWhisperer; - -namespace ApiBooster { - -class ApiBooster : public clang::ast_matchers::MatchFinder::MatchCallback, - public clang::tooling::SourceFileCallbacks { -public: - ApiBooster(std::map& replacements) - : replacements_(replacements) {} - - // AST match callback dispatcher. - void run(const clang::ast_matchers::MatchFinder::MatchResult& match_result) override { - clang::SourceManager& source_manager = match_result.Context->getSourceManager(); - DEBUG_LOG("AST match callback dispatcher"); - for (const auto it : match_result.Nodes.getMap()) { - const std::string match_text = getSourceText(it.second.getSourceRange(), source_manager); - const clang::SourceRange spelling_range = - getSpellingRange(it.second.getSourceRange(), source_manager); - const std::string spelling_text = getSourceText(spelling_range, source_manager); - DEBUG_LOG(absl::StrCat(" Result for ", it.first, " [", truncateForDebug(match_text), "]")); - if (match_text != spelling_text) { - DEBUG_LOG(absl::StrCat(" with spelling text [", truncateForDebug(spelling_text), "]")); - } - } - if (const auto* type_loc = match_result.Nodes.getNodeAs("type")) { - onTypeLocMatch(*type_loc, source_manager); - return; - } - if (const auto* using_decl = match_result.Nodes.getNodeAs("using_decl")) { - onUsingDeclMatch(*using_decl, source_manager); - return; - } - if (const auto* decl_ref_expr = - match_result.Nodes.getNodeAs("decl_ref_expr")) { - onDeclRefExprMatch(*decl_ref_expr, *match_result.Context, source_manager); - return; - } - if (const auto* call_expr = match_result.Nodes.getNodeAs("call_expr")) { - onCallExprMatch(*call_expr, *match_result.Context, source_manager); - return; - } - if (const auto* member_call_expr = - match_result.Nodes.getNodeAs("member_call_expr")) { - onMemberCallExprMatch(*member_call_expr, source_manager); - return; - } - if (const auto* tmpl = - match_result.Nodes.getNodeAs("tmpl")) { - onClassTemplateSpecializationDeclMatch(*tmpl, source_manager); - return; - } - } - - // Visitor callback for start of a compilation unit. - bool handleBeginSource(clang::CompilerInstance& CI) override { - source_api_proto_paths_.clear(); - return true; - } - - // Visitor callback for end of a compilation unit. - void handleEndSource() override { - // Dump known API header paths to stdout for api_boost.py to rewrite with - // (no rewriting support in this tool yet). - for (const std::string& proto_path : source_api_proto_paths_) { - std::cout << proto_path << std::endl; - } - } - -private: - static bool isEnvoyNamespace(absl::string_view s) { - return absl::StartsWith(s, "envoy::") || absl::StartsWith(s, "::envoy::"); - } - - static std::string truncateForDebug(const std::string& text) { - const uint32_t MaxExpansionChars = 250; - return text.size() > MaxExpansionChars ? text.substr(0, MaxExpansionChars) + "..." : text; - } - - // Match callback for TypeLoc. These are explicit mentions of the type in the - // source. If we have a match on type, we should track the corresponding .pb.h - // and attempt to upgrade. - void onTypeLocMatch(const clang::TypeLoc& type_loc, const clang::SourceManager& source_manager) { - absl::optional source_range; - const std::string type_name = - type_loc.getType().getCanonicalType().getUnqualifiedType().getAsString(); - // Remove qualifiers, e.g. const. - const clang::UnqualTypeLoc unqual_type_loc = type_loc.getUnqualifiedLoc(); - DEBUG_LOG(absl::StrCat("Type class ", type_loc.getType()->getTypeClassName())); - // Today we are only smart enough to rewrite ElaborateTypeLoc, which are - // full namespace prefixed types. We probably will need to support more, in - // particular if we want message-level type renaming. TODO(htuch): add more - // supported AST TypeLoc classes as needed. - if (unqual_type_loc.getTypeLocClass() == clang::TypeLoc::Elaborated && - isEnvoyNamespace(getSourceText( - getSpellingRange(unqual_type_loc.getSourceRange(), source_manager), source_manager))) { - source_range = absl::make_optional(unqual_type_loc.getSourceRange()); - tryBoostType(type_name, source_range, source_manager, type_loc.getType()->getTypeClassName(), - false); - } else { - // If we're not going to rewrite, we still deliver SourceLocation to - // tryBoostType to assist with determination of API_NO_BOOST(). - tryBoostType(type_name, unqual_type_loc.getBeginLoc(), -1, source_manager, - type_loc.getType()->getTypeClassName(), false); - } - } - - // Match callback for clang::UsingDecl. These are 'using' aliases for API type - // names. - void onUsingDeclMatch(const clang::UsingDecl& using_decl, - const clang::SourceManager& source_manager) { - // Not all using declaration are types, but we try the rewrite in case there - // is such an API type database match. - const clang::SourceRange source_range = clang::SourceRange( - using_decl.getQualifierLoc().getBeginLoc(), using_decl.getNameInfo().getEndLoc()); - const std::string type_name = getSourceText(source_range, source_manager); - tryBoostType(type_name, source_range, source_manager, "UsingDecl", true); - } - - // Match callback for clang::DeclRefExpr. These occur when enums constants, - // e.g. foo::bar::kBaz, appear in the source. - void onDeclRefExprMatch(const clang::DeclRefExpr& decl_ref_expr, const clang::ASTContext& context, - const clang::SourceManager& source_manager) { - // We don't need to consider non-namespace qualified DeclRefExprfor now (no - // renaming support yet). - if (!decl_ref_expr.hasQualifier()) { - return; - } - const std::string decl_name = decl_ref_expr.getNameInfo().getAsString(); - // There are generated methods to stringify/parse/validate enum values, - // these need special treatment as they look like types with special - // suffices. - for (const std::string& enum_generated_method_suffix : {"_Name", "_Parse", "_IsValid"}) { - if (absl::EndsWith(decl_name, enum_generated_method_suffix)) { - // Remove trailing suffix from reference for replacement range and type - // name purposes. - const clang::SourceLocation begin_loc = - source_manager.getSpellingLoc(decl_ref_expr.getBeginLoc()); - const std::string type_name_with_suffix = - getSourceText(decl_ref_expr.getSourceRange(), source_manager); - const std::string type_name = type_name_with_suffix.substr( - 0, type_name_with_suffix.size() - enum_generated_method_suffix.size()); - tryBoostType(type_name, begin_loc, type_name.size(), source_manager, - "DeclRefExpr suffixed " + enum_generated_method_suffix, false); - return; - } - } - // Remove trailing : from namespace qualifier. - const clang::SourceRange source_range = - clang::SourceRange(decl_ref_expr.getQualifierLoc().getBeginLoc(), - decl_ref_expr.getQualifierLoc().getEndLoc().getLocWithOffset(-1)); - // Only try to boost type if it's explicitly an Envoy qualified type. - const std::string source_type_name = getSourceText(source_range, source_manager); - const clang::QualType ast_type = - decl_ref_expr.getDecl()->getType().getCanonicalType().getUnqualifiedType(); - const std::string ast_type_name = ast_type.getAsString(); - if (isEnvoyNamespace(source_type_name)) { - // Generally we pull the type from the named entity's declaration type, - // since this allows us to map from things like envoy::type::HTTP2 to the - // underlying fully qualified envoy::type::CodecClientType::HTTP2 prior to - // API type database lookup. However, for the generated static methods or - // field accessors, we don't want to deal with lookup via the function - // type, so we use the source text directly. - const std::string type_name = ast_type.isPODType(context) ? ast_type_name : source_type_name; - tryBoostType(type_name, source_range, source_manager, "DeclRefExpr", true); - } - const auto latest_type_info = getTypeInformationFromCType(ast_type_name, true); - // In some cases we need to upgrade the name the DeclRefExpr points at. If - // this isn't a known API type, our work here is done. - if (!latest_type_info) { - return; - } - const clang::SourceRange decl_source_range = decl_ref_expr.getNameInfo().getSourceRange(); - // Deprecated enum constants need to be upgraded. - if (latest_type_info->enum_type_) { - const auto enum_value_rename = - ProtoCxxUtils::renameEnumValue(decl_name, latest_type_info->renames_); - if (enum_value_rename) { - const clang::SourceRange decl_source_range = decl_ref_expr.getNameInfo().getSourceRange(); - const clang::tooling::Replacement enum_value_replacement( - source_manager, source_manager.getSpellingLoc(decl_source_range.getBegin()), - sourceRangeLength(decl_source_range, source_manager), *enum_value_rename); - insertReplacement(enum_value_replacement); - } - return; - } - // We need to map from envoy::type::matcher::StringMatcher::kRegex to - // envoy::type::matcher::v3::StringMatcher::kHiddenEnvoyDeprecatedRegex. - const auto constant_rename = - ProtoCxxUtils::renameConstant(decl_name, latest_type_info->renames_); - if (constant_rename) { - const clang::tooling::Replacement constant_replacement( - source_manager, decl_source_range.getBegin(), - sourceRangeLength(decl_source_range, source_manager), *constant_rename); - insertReplacement(constant_replacement); - } - } - - // Match callback clang::CallExpr. We don't need to rewrite, but if it's something like - // loadFromYamlAndValidate, we might need to look at the argument type to - // figure out any corresponding .pb.validate.h we require. - void onCallExprMatch(const clang::CallExpr& call_expr, const clang::ASTContext& context, - const clang::SourceManager& source_manager) { - auto* direct_callee = call_expr.getDirectCallee(); - if (direct_callee != nullptr) { - const absl::node_hash_map ValidateNameToArg = { - {"loadFromYamlAndValidate", 1}, - {"loadFromFileAndValidate", 1}, - {"downcastAndValidate", -1}, - {"validate", 0}, - }; - const std::string& callee_name = direct_callee->getNameInfo().getName().getAsString(); - DEBUG_LOG(absl::StrCat("callee_name ", callee_name)); - const auto arg = ValidateNameToArg.find(callee_name); - // Sometimes we hit false positives because we aren't qualifying above. - // TODO(htuch): fix this. - if (arg != ValidateNameToArg.end() && - arg->second < static_cast(call_expr.getNumArgs())) { - const std::string type_name = arg->second >= 0 ? call_expr.getArg(arg->second) - ->getType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString() - : call_expr.getCallReturnType(context) - .getNonReferenceType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString(); - DEBUG_LOG(absl::StrCat("Validation header boosting ", type_name)); - tryBoostType(type_name, {}, source_manager, "validation invocation", true, true); - } - } - } - - // Match callback for clang::CxxMemberCallExpr. We rewrite things like - // ->mutable_foo() to ->mutable_foo_new_name() during renames. - void onMemberCallExprMatch(const clang::CXXMemberCallExpr& member_call_expr, - const clang::SourceManager& source_manager) { - const std::string type_name = - member_call_expr.getObjectType().getCanonicalType().getUnqualifiedType().getAsString(); - const auto latest_type_info = getTypeInformationFromCType(type_name, true); - // If this isn't a known API type, our work here is done. - if (!latest_type_info) { - return; - } - // Figure out if the referenced object was declared under API_NO_BOOST. This - // only works for simple cases, best effort. - const auto* object_expr = member_call_expr.getImplicitObjectArgument(); - if (object_expr != nullptr) { - const auto* decl = object_expr->getReferencedDeclOfCallee(); - if (decl != nullptr && - getSourceText(decl->getSourceRange(), source_manager).find("API_NO_BOOST") != - std::string::npos) { - DEBUG_LOG("Skipping method replacement due to API_NO_BOOST"); - return; - } - } - tryRenameMethod(*latest_type_info, member_call_expr.getExprLoc(), source_manager); - } - - bool tryRenameMethod(const TypeInformation& type_info, clang::SourceLocation method_loc, - const clang::SourceManager& source_manager) { - const clang::SourceRange source_range = {source_manager.getSpellingLoc(method_loc), - source_manager.getSpellingLoc(method_loc)}; - const std::string method_name = getSourceText(source_range, source_manager); - DEBUG_LOG(absl::StrCat("Checking for rename of ", method_name)); - const auto method_rename = ProtoCxxUtils::renameMethod(method_name, type_info.renames_); - if (method_rename) { - const clang::tooling::Replacement method_replacement( - source_manager, source_range.getBegin(), sourceRangeLength(source_range, source_manager), - *method_rename); - insertReplacement(method_replacement); - return true; - } - return false; - } - - // Match callback for clang::ClassTemplateSpecializationDecl. An additional - // place we need to look for .pb.validate.h reference is instantiation of - // FactoryBase. - void onClassTemplateSpecializationDeclMatch(const clang::ClassTemplateSpecializationDecl& tmpl, - const clang::SourceManager& source_manager) { - const std::string tmpl_type_name = tmpl.getSpecializedTemplate() - ->getInjectedClassNameSpecialization() - .getCanonicalType() - .getAsString(); - if (absl::EndsWith(tmpl_type_name, "FactoryBase")) { - const std::string type_name = tmpl.getTemplateArgs() - .get(0) - .getAsType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString(); - tryBoostType(type_name, {}, source_manager, "FactoryBase template", true, true); - } - if (tmpl_type_name == "FactoryBase") { - const std::string type_name_0 = tmpl.getTemplateArgs() - .get(0) - .getAsType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString(); - tryBoostType(type_name_0, {}, source_manager, "FactoryBase template", true, true); - const std::string type_name_1 = tmpl.getTemplateArgs() - .get(1) - .getAsType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString(); - tryBoostType(type_name_1, {}, source_manager, "FactoryBase template", true, true); - } - } - - // Attempt to boost a given type and rewrite the given source range. - void tryBoostType(const std::string& type_name, absl::optional source_range, - const clang::SourceManager& source_manager, absl::string_view debug_description, - bool requires_enum_truncation, bool validation_required = false) { - if (source_range) { - tryBoostType(type_name, source_range->getBegin(), - sourceRangeLength(*source_range, source_manager), source_manager, - debug_description, requires_enum_truncation, validation_required); - } else { - tryBoostType(type_name, {}, -1, source_manager, debug_description, requires_enum_truncation, - validation_required); - } - } - - bool underApiNoBoost(clang::SourceLocation loc, const clang::SourceManager& source_manager) { - if (loc.isMacroID()) { - const auto macro_name = clang::Lexer::getImmediateMacroName(loc, source_manager, lexer_lopt_); - if (macro_name.str() == "API_NO_BOOST") { - return true; - } - } - return false; - } - - void tryBoostType(const std::string& type_name, clang::SourceLocation begin_loc, int length, - const clang::SourceManager& source_manager, absl::string_view debug_description, - bool requires_enum_truncation, bool validation_required = false) { - bool is_skip_macro = false; - if (underApiNoBoost(begin_loc, source_manager)) { - DEBUG_LOG("Skipping replacement due to API_NO_BOOST"); - is_skip_macro = true; - } - const auto type_info = getTypeInformationFromCType(type_name, !is_skip_macro); - // If this isn't a known API type, our work here is done. - if (!type_info) { - return; - } - DEBUG_LOG(absl::StrCat("Matched type '", type_name, "' (", debug_description, ") length ", - length, " at ", begin_loc.printToString(source_manager))); - // Track corresponding imports. - source_api_proto_paths_.insert(adjustProtoSuffix(type_info->proto_path_, ".pb.h")); - if (validation_required) { - source_api_proto_paths_.insert(adjustProtoSuffix(type_info->proto_path_, ".pb.validate.h")); - } - // Not all AST matchers know how to do replacements (yet?). - if (length == -1 || is_skip_macro) { - return; - } - const clang::SourceLocation spelling_begin = source_manager.getSpellingLoc(begin_loc); - // We need to look at the text we're replacing to decide whether we should - // use the qualified C++'ified proto name. - const bool qualified = - getSourceText(spelling_begin, length, source_manager).find("::") != std::string::npos; - std::string case_residual; - if (absl::EndsWith(type_name, "Case")) { - case_residual = type_name.substr(type_name.rfind(':') - 1); - } - // Add corresponding replacement. - const clang::tooling::Replacement type_replacement( - source_manager, source_manager.getSpellingLoc(begin_loc), length, - ProtoCxxUtils::protoToCxxType(type_info->type_name_, qualified, - type_info->enum_type_ && requires_enum_truncation) + - case_residual); - insertReplacement(type_replacement); - } - - void insertReplacement(const clang::tooling::Replacement& replacement) { - llvm::Error error = replacements_[std::string(replacement.getFilePath())].add(replacement); - if (error) { - std::cerr << " Replacement insertion error: " << llvm::toString(std::move(error)) - << std::endl; - } else { - std::cerr << " Replacement added: " << replacement.toString() << std::endl; - } - } - - // Modeled after getRangeSize() in Clang's Replacements.cpp. Turns out it's - // non-trivial to get the actual length of a SourceRange, as the end location - // point to the start of the last token. - int sourceRangeLength(clang::SourceRange source_range, - const clang::SourceManager& source_manager) { - const clang::SourceLocation spelling_begin = - source_manager.getSpellingLoc(source_range.getBegin()); - const clang::SourceLocation spelling_end = source_manager.getSpellingLoc(source_range.getEnd()); - std::pair start = source_manager.getDecomposedLoc(spelling_begin); - std::pair end = source_manager.getDecomposedLoc(spelling_end); - if (start.first != end.first) { - return -1; - } - end.second += clang::Lexer::MeasureTokenLength(spelling_end, source_manager, lexer_lopt_); - return end.second - start.second; - } - - std::string getSourceText(clang::SourceLocation begin_loc, int size, - const clang::SourceManager& source_manager) { - return std::string(clang::Lexer::getSourceText( - {clang::SourceRange(begin_loc, begin_loc.getLocWithOffset(size)), false}, source_manager, - lexer_lopt_, 0)); - } - - std::string getSourceText(clang::SourceRange source_range, - const clang::SourceManager& source_manager) { - return std::string(clang::Lexer::getSourceText( - clang::CharSourceRange::getTokenRange(source_range), source_manager, lexer_lopt_, 0)); - } - - void addNamedspaceQualifiedTypeReplacement() {} - - // Remove .proto from a path, apply specified suffix instead. - std::string adjustProtoSuffix(absl::string_view proto_path, absl::string_view suffix) { - return absl::StrCat(proto_path.substr(0, proto_path.size() - 6), suffix); - } - - // Obtain the latest type information for a given from C++ type, e.g. envoy:config::v2::Cluster, - // from the API type database. - absl::optional getTypeInformationFromCType(const std::string& c_type_name, - bool latest) { - // Ignore compound or non-API types. - // TODO(htuch): this is all super hacky and not really right, we should be - // removing qualifiers etc. to get to the underlying type name. - const std::string type_name = std::regex_replace(c_type_name, std::regex("^(class|enum) "), ""); - if (!isEnvoyNamespace(type_name) || absl::StrContains(type_name, " ")) { - return {}; - } - const std::string proto_type_name = ProtoCxxUtils::cxxToProtoType(type_name); - - // Use API type database to map from proto type to path. - auto result = latest ? ApiTypeDb::getLatestTypeInformation(proto_type_name) - : ApiTypeDb::getExistingTypeInformation(proto_type_name); - if (result) { - // Remove the .proto extension. - return result; - } else if (!absl::StartsWith(proto_type_name, "envoy.HotRestart") && - !absl::StartsWith(proto_type_name, "envoy.RouterCheckToolSchema") && - !absl::StartsWith(proto_type_name, "envoy.annotations") && - !absl::StartsWith(proto_type_name, "envoy.test") && - !absl::StartsWith(proto_type_name, "envoy.tracers.xray.daemon")) { - // Die hard if we don't have a useful proto type for something that looks - // like an API type(modulo a short allowlist). - std::cerr << "Unknown API type: " << proto_type_name << std::endl; - // TODO(htuch): maybe there is a nicer way to terminate AST traversal? - ::exit(1); - } - - return {}; - } - - static clang::SourceRange getSpellingRange(clang::SourceRange source_range, - const clang::SourceManager& source_manager) { - return {source_manager.getSpellingLoc(source_range.getBegin()), - source_manager.getSpellingLoc(source_range.getEnd())}; - } - - // Set of inferred .pb[.validate].h, updated as the AST matcher callbacks above fire. - std::set source_api_proto_paths_; - // Map from source file to replacements. - std::map& replacements_; - // Language options for interacting with Lexer. Currently empty. - clang::LangOptions lexer_lopt_; -}; // namespace ApiBooster - -} // namespace ApiBooster - -int main(int argc, const char** argv) { - // Apply a custom category to all command-line options so that they are the - // only ones displayed. - llvm::cl::OptionCategory api_booster_tool_category("api-booster options"); - - clang::tooling::CommonOptionsParser options_parser(argc, argv, api_booster_tool_category); - clang::tooling::RefactoringTool tool(options_parser.getCompilations(), - options_parser.getSourcePathList()); - - ApiBooster::ApiBooster api_booster(tool.getReplacements()); - clang::ast_matchers::MatchFinder finder; - - // Match on all mentions of types in the AST. - auto type_matcher = - clang::ast_matchers::typeLoc(clang::ast_matchers::isExpansionInMainFile()).bind("type"); - finder.addMatcher(type_matcher, &api_booster); - - // Match on all "using" declarations. - auto using_decl_matcher = - clang::ast_matchers::usingDecl(clang::ast_matchers::isExpansionInMainFile()) - .bind("using_decl"); - finder.addMatcher(using_decl_matcher, &api_booster); - - // Match on references to enum constants. - auto decl_ref_expr_matcher = - clang::ast_matchers::declRefExpr(clang::ast_matchers::isExpansionInMainFile()) - .bind("decl_ref_expr"); - finder.addMatcher(decl_ref_expr_matcher, &api_booster); - - // Match on all call expressions. We are interested in particular in calls - // where validation on protos is performed. - auto call_matcher = - clang::ast_matchers::callExpr(clang::ast_matchers::isExpansionInMainFile()).bind("call_expr"); - finder.addMatcher(call_matcher, &api_booster); - - // Match on all .foo() or ->foo() expressions. We are interested in these for renames - // and deprecations. - auto member_call_expr = - clang::ast_matchers::cxxMemberCallExpr(clang::ast_matchers::isExpansionInMainFile()) - .bind("member_call_expr"); - finder.addMatcher(member_call_expr, &api_booster); - - // Match on all template instantiations. We are interested in particular in - // instantiations of factories where validation on protos is performed. - auto tmpl_matcher = clang::ast_matchers::classTemplateSpecializationDecl( - clang::ast_matchers::matchesName(".*FactoryBase.*")) - .bind("tmpl"); - finder.addMatcher(tmpl_matcher, &api_booster); - - // Apply ApiBooster to AST matches. This will generate a set of replacements in - // tool.getReplacements(). - const int run_result = tool.run(newFrontendActionFactory(&finder, &api_booster).get()); - if (run_result != 0) { - std::cerr << "Exiting with non-zero result " << run_result << std::endl; - return run_result; - } - - // Serialize replacements to

.clang-replacements.yaml. - // These are suitable for consuming by clang-apply-replacements. - for (const auto& file_replacement : tool.getReplacements()) { - // Populate TranslationUnitReplacements from file replacements (this is what - // there exists llvm::yaml serialization support for). - clang::tooling::TranslationUnitReplacements tu_replacements; - tu_replacements.MainSourceFile = file_replacement.first; - for (const auto& r : file_replacement.second) { - tu_replacements.Replacements.push_back(r); - DEBUG_LOG(r.toString()); - } - // Serialize TranslationUnitReplacements to YAML. - std::string yaml_content; - llvm::raw_string_ostream yaml_content_stream(yaml_content); - llvm::yaml::Output yaml(yaml_content_stream); - yaml << tu_replacements; - // Write to
.clang-replacements.yaml. - std::ofstream serialized_replacement_file(tu_replacements.MainSourceFile + - ".clang-replacements.yaml"); - serialized_replacement_file << yaml_content_stream.str(); - } - - return 0; -} diff --git a/tools/clang_tools/api_booster/proto_cxx_utils.cc b/tools/clang_tools/api_booster/proto_cxx_utils.cc deleted file mode 100644 index 194bdc0e6bf7b..0000000000000 --- a/tools/clang_tools/api_booster/proto_cxx_utils.cc +++ /dev/null @@ -1,102 +0,0 @@ -#include "proto_cxx_utils.h" - -namespace ApiBooster { - -std::string ProtoCxxUtils::cxxToProtoType(const std::string& cxx_type_name) { - // Convert from C++ to a qualified proto type. This is fairly hacky stuff, - // we're essentially reversing the conventions that the protobuf C++ - // compiler is using, e.g. replacing _ and :: with . as needed, guessing - // that a Case suffix implies some enum switching. - const std::string rel_cxx_type_name = - absl::StartsWith(cxx_type_name, "::") ? cxx_type_name.substr(2) : cxx_type_name; - std::vector frags = absl::StrSplit(rel_cxx_type_name, "::"); - // TODO(htuch): if we add some more stricter checks on mangled name usage in - // check_format.py, we should be able to eliminate this. - for (std::string& frag : frags) { - if (!frag.empty() && isupper(frag[0])) { - frag = std::regex_replace(frag, std::regex("_"), "."); - } - } - if (absl::EndsWith(frags.back(), "Case")) { - frags.pop_back(); - } - return absl::StrJoin(frags, "."); -} - -std::string ProtoCxxUtils::protoToCxxType(const std::string& proto_type_name, bool qualified, - bool enum_type) { - std::vector frags = absl::StrSplit(proto_type_name, '.'); - // We drop the enum type name, it's not needed and confuses the mangling - // when enums are nested in messages. - if (enum_type) { - frags.pop_back(); - } - if (qualified) { - return absl::StrJoin(frags, "::"); - } else { - return frags.back(); - } -} - -absl::optional -ProtoCxxUtils::renameMethod(absl::string_view method_name, - const absl::node_hash_map renames) { - // Simple O(N * M) match, where M is constant (the set of prefixes/suffixes) so - // should be fine. - for (const auto& field_rename : renames) { - const std::vector GeneratedMethodPrefixes = { - "clear_", "set_", "has_", "mutable_", "set_allocated_", "release_", "add_", "", - }; - // Most of the generated methods are some prefix. - for (const std::string& prefix : GeneratedMethodPrefixes) { - if (method_name == prefix + field_rename.first) { - return prefix + field_rename.second; - } - } - // _size is the only suffix. - if (method_name == field_rename.first + "_size") { - return field_rename.second + "_size"; - } - } - return {}; -} - -absl::optional -ProtoCxxUtils::renameConstant(absl::string_view constant_name, - const absl::node_hash_map renames) { - if (constant_name.size() < 2 || constant_name[0] != 'k' || !isupper(constant_name[1])) { - return {}; - } - std::vector frags; - for (const char c : constant_name.substr(1)) { - if (isupper(c)) { - frags.emplace_back(1, tolower(c)); - } else { - frags.back().push_back(c); - } - } - const std::string field_name = absl::StrJoin(frags, "_"); - const auto it = renames.find(field_name); - if (it == renames.cend()) { - return {}; - } - std::vector new_frags = absl::StrSplit(it->second, '_'); - for (auto& frag_it : new_frags) { - if (!frag_it.empty()) { - frag_it[0] = toupper(frag_it[0]); - } - } - return "k" + absl::StrJoin(new_frags, ""); -} - -absl::optional -ProtoCxxUtils::renameEnumValue(absl::string_view enum_value_name, - const absl::node_hash_map renames) { - const auto it = renames.find(std::string(enum_value_name)); - if (it == renames.cend()) { - return {}; - } - return it->second; -} - -} // namespace ApiBooster diff --git a/tools/clang_tools/api_booster/proto_cxx_utils.h b/tools/clang_tools/api_booster/proto_cxx_utils.h deleted file mode 100644 index 10eff61a79104..0000000000000 --- a/tools/clang_tools/api_booster/proto_cxx_utils.h +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -#include - -#include "absl/container/node_hash_map.h" -#include "absl/strings/str_join.h" -#include "absl/strings/str_split.h" -#include "absl/types/optional.h" - -namespace ApiBooster { - -// Protobuf C++ code generation hackery. This is where the utilities that map -// between C++ and protobuf types, enum constants and identifiers live. Most of -// this is heuristic and needs to match whatever the protobuf compiler does. -// TODO(htuch): investigate what can be done to make use of embedded proto -// descriptors in generated stubs to make these utils more robust. -class ProtoCxxUtils { -public: - // Convert from a C++ type, e.g. foo::bar::v2, to a protobuf type, e.g. - // foo.bar.v2. - static std::string cxxToProtoType(const std::string& cxx_type_name); - - // Given a method, e.g. mutable_foo, rele, and a map of renames in a give proto, - // determine if the method is covered by a generated C++ stub for a renamed - // field in proto, and if so, return the new method name. - static absl::optional - renameMethod(absl::string_view method_name, - const absl::node_hash_map renames); - - // Given a constant, e.g. kFooBar, determine if it needs upgrading. We need - // this for synthesized oneof cases. - static absl::optional - renameConstant(absl::string_view constant_name, - const absl::node_hash_map renames); - - // Given an enum value, e.g. FOO_BAR determine if it needs upgrading. - static absl::optional - renameEnumValue(absl::string_view enum_value_name, - const absl::node_hash_map renames); - - // Convert from a protobuf type, e.g. foo.bar.v2, to a C++ type, e.g. - // foo::bar::v2. - static std::string protoToCxxType(const std::string& proto_type_name, bool qualified, - bool enum_type); -}; - -} // namespace ApiBooster diff --git a/tools/clang_tools/api_booster/proto_cxx_utils_test.cc b/tools/clang_tools/api_booster/proto_cxx_utils_test.cc deleted file mode 100644 index 2a06413bd4d25..0000000000000 --- a/tools/clang_tools/api_booster/proto_cxx_utils_test.cc +++ /dev/null @@ -1,72 +0,0 @@ -#include "gtest/gtest.h" -#include "proto_cxx_utils.h" - -namespace ApiBooster { -namespace { - -// Validate C++ to proto type name conversion. -TEST(ProtoCxxUtils, CxxToProtoType) { - EXPECT_EQ("", ProtoCxxUtils::cxxToProtoType("")); - EXPECT_EQ("foo", ProtoCxxUtils::cxxToProtoType("foo")); - EXPECT_EQ("foo.bar", ProtoCxxUtils::cxxToProtoType("foo::bar")); - EXPECT_EQ("foo.bar", ProtoCxxUtils::cxxToProtoType("foo::bar::FooCase")); - EXPECT_EQ("foo.bar.Baz.Blah", ProtoCxxUtils::cxxToProtoType("foo::bar::Baz_Blah")); -} - -// Validate proto to C++ type name conversion. -TEST(ProtoCxxUtils, ProtoToCxxType) { - EXPECT_EQ("", ProtoCxxUtils::protoToCxxType("", false, false)); - EXPECT_EQ("", ProtoCxxUtils::protoToCxxType("", true, false)); - EXPECT_EQ("foo", ProtoCxxUtils::protoToCxxType("foo", false, false)); - EXPECT_EQ("foo", ProtoCxxUtils::protoToCxxType("foo", true, false)); - EXPECT_EQ("bar", ProtoCxxUtils::protoToCxxType("foo.bar", false, false)); - EXPECT_EQ("foo::bar", ProtoCxxUtils::protoToCxxType("foo.bar", true, false)); - EXPECT_EQ("foo::Bar", ProtoCxxUtils::protoToCxxType("foo.Bar", true, false)); - EXPECT_EQ("foo", ProtoCxxUtils::protoToCxxType("foo.Bar", true, true)); - EXPECT_EQ("foo::Bar::Baz", ProtoCxxUtils::protoToCxxType("foo.Bar.Baz", true, false)); - EXPECT_EQ("foo::Bar::Baz::Blah", ProtoCxxUtils::protoToCxxType("foo.Bar.Baz.Blah", true, false)); - EXPECT_EQ("foo::Bar::Baz", ProtoCxxUtils::protoToCxxType("foo.Bar.Baz.Blah", true, true)); -} - -// Validate proto field accessor upgrades. -TEST(ProtoCxxUtils, RenameMethod) { - const absl::node_hash_map renames = { - {"foo", "bar"}, - {"bar", "baz"}, - }; - EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameMethod("whatevs", renames)); - EXPECT_EQ("bar", ProtoCxxUtils::renameMethod("foo", renames)); - EXPECT_EQ("baz", ProtoCxxUtils::renameMethod("bar", renames)); - - EXPECT_EQ("clear_bar", ProtoCxxUtils::renameMethod("clear_foo", renames)); - EXPECT_EQ("set_bar", ProtoCxxUtils::renameMethod("set_foo", renames)); - EXPECT_EQ("has_bar", ProtoCxxUtils::renameMethod("has_foo", renames)); - EXPECT_EQ("mutable_bar", ProtoCxxUtils::renameMethod("mutable_foo", renames)); - EXPECT_EQ("set_allocated_bar", ProtoCxxUtils::renameMethod("set_allocated_foo", renames)); - EXPECT_EQ("release_bar", ProtoCxxUtils::renameMethod("release_foo", renames)); - EXPECT_EQ("add_bar", ProtoCxxUtils::renameMethod("add_foo", renames)); - EXPECT_EQ("bar_size", ProtoCxxUtils::renameMethod("foo_size", renames)); -} - -// Validate proto constant upgrades. -TEST(ProtoCxxUtils, RenameConstant) { - const absl::node_hash_map renames = { - {"foo_bar", "bar_foo"}, - {"foo_baz", "baz"}, - }; - EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameConstant("whatevs", renames)); - EXPECT_EQ("kBarFoo", ProtoCxxUtils::renameConstant("kFooBar", renames)); - EXPECT_EQ("kBaz", ProtoCxxUtils::renameConstant("kFooBaz", renames)); -} - -// Validate proto enum value upgrades. -TEST(ProtoCxxUtils, RenameEnumValue) { - const absl::node_hash_map renames = { - {"FOO_BAR", "BAR_FOO"}, - }; - EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameEnumValue("FOO_BAZ", renames)); - EXPECT_EQ("BAR_FOO", ProtoCxxUtils::renameEnumValue("FOO_BAR", renames)); -} - -} // namespace -} // namespace ApiBooster diff --git a/tools/code_format/BUILD b/tools/code_format/BUILD index ba9de5fce8557..11416d9ca8415 100644 --- a/tools/code_format/BUILD +++ b/tools/code_format/BUILD @@ -1,6 +1,6 @@ load("@base_pip3//:requirements.bzl", "requirement") +load("@rules_python//python:defs.bzl", "py_binary") load("//bazel:envoy_build_system.bzl", "envoy_package") -load("//tools/base:envoy_python.bzl", "envoy_py_binary") licenses(["notice"]) # Apache 2 @@ -12,14 +12,11 @@ exports_files([ "envoy_build_fixer.py", ]) -envoy_py_binary( - name = "tools.code_format.python_check", +py_binary( + name = "python_check", + srcs = ["python_check.py"], deps = [ - "//tools/base:aio", - "//tools/base:checker", - "//tools/base:utils", - requirement("flake8"), - requirement("pep8-naming"), - requirement("yapf"), + "@envoy_repo", + requirement("envoy.code_format.python_check"), ], ) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index fa88387510e19..d01897fc338a0 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -151,8 +151,6 @@ # Please DO NOT extend this allow list without consulting # @envoyproxy/dependency-shepherds. BUILD_URLS_ALLOWLIST = ( - "./generated_api_shadow/bazel/repository_locations.bzl", - "./generated_api_shadow/bazel/envoy_http_archive.bzl", "./bazel/repository_locations.bzl", "./bazel/external/cargo/crates.bzl", "./api/bazel/repository_locations.bzl", @@ -282,7 +280,6 @@ def __init__(self, args): self.operation_type = args.operation_type self.target_path = args.target_path self.api_prefix = args.api_prefix - self.api_shadow_root = args.api_shadow_prefix self.envoy_build_rule_check = not args.skip_envoy_build_rule_check self.namespace_check = args.namespace_check self.namespace_check_excluded_paths = args.namespace_check_excluded_paths + [ @@ -486,7 +483,7 @@ def allow_listed_for_build_urls(self, file_path): return file_path in BUILD_URLS_ALLOWLIST def is_api_file(self, file_path): - return file_path.startswith(self.api_prefix) or file_path.startswith(self.api_shadow_root) + return file_path.startswith(self.api_prefix) def is_build_file(self, file_path): basename = os.path.basename(file_path) @@ -869,7 +866,7 @@ def check_source_line(self, line, file_path, report_error): + "https://github.com/LuaJIT/LuaJIT/issues/450#issuecomment-433659873 for details.") if file_path.endswith(PROTO_SUFFIX): - exclude_path = ['v1', 'v2', 'generated_api_shadow'] + exclude_path = ['v1', 'v2'] result = PROTO_VALIDATION_STRING.search(line) if result is not None: if not any(x in file_path for x in exclude_path): @@ -926,8 +923,7 @@ def check_build_path(self, file_path): error_messages += self.execute_command( command, "envoy_build_fixer check failed", file_path) - if self.is_build_file(file_path) and (file_path.startswith(self.api_prefix + "envoy") or - file_path.startswith(self.api_shadow_root + "envoy")): + if self.is_build_file(file_path) and file_path.startswith(self.api_prefix + "envoy"): found = False for line in self.read_lines(file_path): if "api_proto_package(" in line: @@ -1053,21 +1049,6 @@ def check_owners(self, dir_name, owned_directories, error_messages): error_messages.append( "New directory %s appears to not have owners in CODEOWNERS" % dir_name) - def check_api_shadow_starlark_files(self, file_path, error_messages): - command = "diff -u " - command += file_path + " " - api_shadow_starlark_path = self.api_shadow_root + re.sub(r"\./api/", '', file_path) - command += api_shadow_starlark_path - - error_message = self.execute_command( - command, "invalid .bzl in generated_api_shadow", file_path) - if self.operation_type == "check": - error_messages += error_message - elif self.operation_type == "fix" and len(error_message) != 0: - shutil.copy(file_path, api_shadow_starlark_path) - - return error_messages - def check_format_visitor(self, arg, dir_name, names): """Run check_format in parallel for the given files. Args: @@ -1103,11 +1084,6 @@ def check_format_visitor(self, arg, dir_name, names): self.check_owners(str(top_level), owned_directories, error_messages) for file_name in names: - if dir_name.startswith("./api") and self.is_starlark_file(file_name): - result = pool.apply_async( - self.check_api_shadow_starlark_files, - args=(dir_name + "/" + file_name, error_messages)) - result_list.append(result) result = pool.apply_async( self.check_format_return_trace_on_error, args=(dir_name + "/" + file_name,)) result_list.append(result) @@ -1147,11 +1123,6 @@ def whitelisted_for_memcpy(self, file_path): default=multiprocessing.cpu_count(), help="number of worker processes to use; defaults to one per core.") parser.add_argument("--api-prefix", type=str, default="./api/", help="path of the API tree.") - parser.add_argument( - "--api-shadow-prefix", - type=str, - default="./generated_api_shadow/", - help="path of the shadow API tree.") parser.add_argument( "--skip_envoy_build_rule_check", action="store_true", diff --git a/tools/code_format/python_check.py b/tools/code_format/python_check.py index e3a00f45b0cb5..135b7e9fd3ffe 100755 --- a/tools/code_format/python_check.py +++ b/tools/code_format/python_check.py @@ -4,135 +4,51 @@ # # with bazel: # -# bazel run //tools/code_format:python_check -- -h +# $ bazel run //tools/code_format:python_check -- -h # -# alternatively, if you have the necessary python deps available +# $ bazel run //tools/code_format:python_check # -# PYTHONPATH=. ./tools/code_format/python_check.py -h +# with pip: # -# python requires: flake8, yapf +# $ pip install envoy.code_format.python_check +# $ envoy.code_format.python_check -h +# +# usage with pip requires a path, eg +# +# $ envoy.code_format.python_check . +# +# The upstream lib is maintained here: +# +# https://github.com/envoyproxy/pytooling/tree/main/envoy.code_format.python_check +# +# Please submit issues/PRs to the pytooling repo: +# +# https://github.com/envoyproxy/pytooling # -import argparse import pathlib import sys from functools import cached_property -from typing import Iterable, List, Optional, Tuple -from flake8.main.application import Application as Flake8Application # type:ignore +import abstracts -import yapf # type:ignore +from envoy.code_format import python_check -from tools.base import aio, checker, utils +import envoy_repo -FLAKE8_CONFIG = '.flake8' -YAPF_CONFIG = '.style.yapf' -# TODO(phlax): add checks for: -# - isort - - -class PythonChecker(checker.AsyncChecker): - checks = ("flake8", "yapf") - - @property - def diff_file_path(self) -> Optional[pathlib.Path]: - return pathlib.Path(self.args.diff_file) if self.args.diff_file else None +@abstracts.implementer(python_check.APythonChecker) +class EnvoyPythonChecker: @cached_property - def flake8_app(self) -> Flake8Application: - flake8_app = Flake8Application() - flake8_app.initialize(self.flake8_args) - return flake8_app - - @property - def flake8_args(self) -> Tuple[str, ...]: - return ("--config", str(self.flake8_config_path), str(self.path)) - - @property - def flake8_config_path(self) -> pathlib.Path: - return self.path.joinpath(FLAKE8_CONFIG) - - @property - def recurse(self) -> bool: - """Flag to determine whether to apply checks recursively""" - return self.args.recurse - - @property - def yapf_config_path(self) -> pathlib.Path: - return self.path.joinpath(YAPF_CONFIG) - - @property - def yapf_files(self) -> List[str]: - return yapf.file_resources.GetCommandLineFiles( - self.args.paths, - recursive=self.recurse, - exclude=yapf.file_resources.GetExcludePatternsForDir(str(self.path))) - - def add_arguments(self, parser: argparse.ArgumentParser) -> None: - super().add_arguments(parser) - parser.add_argument( - "--recurse", - "-r", - choices=["yes", "no"], - default="yes", - help="Recurse path or paths directories") - parser.add_argument( - "--diff-file", default=None, help="Specify the path to a diff file with fixes") - - async def check_flake8(self) -> None: - """Run flake8 on files and/or repo""" - errors: List[str] = [] - with utils.buffered(stdout=errors, mangle=self._strip_lines): - self.flake8_app.run_checks() - self.flake8_app.report() - if errors: - self.error("flake8", errors) - - async def check_yapf(self) -> None: - """Run flake8 on files and/or repo""" - futures = aio.concurrent(self.yapf_format(python_file) for python_file in self.yapf_files) - - async for (python_file, (reformatted, encoding, changed)) in futures: - self.yapf_result(python_file, reformatted, changed) - - async def on_check_run(self, check: str) -> None: - if check not in self.failed and check not in self.warned: - self.succeed(check, [check]) - - async def on_checks_complete(self) -> int: - if self.diff_file_path and self.has_failed: - result = await aio.async_subprocess.run(["git", "diff", "HEAD"], - cwd=self.path, - capture_output=True) - self.diff_file_path.write_bytes(result.stdout) - return await super().on_checks_complete() - - async def yapf_format(self, python_file: str) -> tuple: - return python_file, yapf.yapf_api.FormatFile( - python_file, - style_config=str(self.yapf_config_path), - in_place=self.fix, - print_diff=not self.fix) - - def yapf_result(self, python_file: str, reformatted: str, changed: bool) -> None: - if not changed: - return self.succeed("yapf", [python_file]) - if self.fix: - return self.warn("yapf", [f"{python_file}: reformatted"]) - if reformatted: - return self.warn("yapf", [f"{python_file}: diff\n{reformatted}"]) - self.error("yapf", [python_file]) - - def _strip_line(self, line: str) -> str: - return line[len(str(self.path)) + 1:] if line.startswith(f"{self.path}/") else line - - def _strip_lines(self, lines: Iterable[str]) -> List[str]: - return [self._strip_line(line) for line in lines if line] + def path(self) -> pathlib.Path: + if self.args.paths: + return pathlib.Path(self.args.paths[0]) + return pathlib.Path(envoy_repo.PATH) -def main(*args: str) -> Optional[int]: - return PythonChecker(*args).run() +def main(*args) -> int: + return EnvoyPythonChecker(*args).run() if __name__ == "__main__": diff --git a/tools/code_format/tests/test_python_check.py b/tools/code_format/tests/test_python_check.py deleted file mode 100644 index 7cf39577d4bb7..0000000000000 --- a/tools/code_format/tests/test_python_check.py +++ /dev/null @@ -1,384 +0,0 @@ -import types -from contextlib import contextmanager -from unittest.mock import AsyncMock, patch, MagicMock, PropertyMock - -import pytest - -from tools.code_format import python_check - - -def test_python_checker_constructor(): - checker = python_check.PythonChecker("path1", "path2", "path3") - assert checker.checks == ("flake8", "yapf") - assert checker.args.paths == ['path1', 'path2', 'path3'] - - -@pytest.mark.parametrize("diff_path", ["", None, "PATH"]) -def test_python_diff_path(patches, diff_path): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "pathlib", - ("PythonChecker.args", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_plib, m_args): - m_args.return_value.diff_file = diff_path - assert checker.diff_file_path == (m_plib.Path.return_value if diff_path else None) - - if diff_path: - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.diff_file, ), {}]) - else: - assert not m_plib.Path.called - - -def test_python_flake8_app(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - ("PythonChecker.flake8_args", dict(new_callable=PropertyMock)), - "Flake8Application", - prefix="tools.code_format.python_check") - - with patched as (m_flake8_args, m_flake8_app): - assert checker.flake8_app == m_flake8_app.return_value - - assert ( - list(m_flake8_app.call_args) - == [(), {}]) - assert ( - list(m_flake8_app.return_value.initialize.call_args) - == [(m_flake8_args.return_value,), {}]) - - -def test_python_flake8_args(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - ("PythonChecker.flake8_config_path", dict(new_callable=PropertyMock)), - ("PythonChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_flake8_config, m_path): - assert ( - checker.flake8_args - == ('--config', - str(m_flake8_config.return_value), - str(m_path.return_value))) - - -def test_python_flake8_config_path(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - ("PythonChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_path, ): - assert checker.flake8_config_path == m_path.return_value.joinpath.return_value - - assert ( - list(m_path.return_value.joinpath.call_args) - == [(python_check.FLAKE8_CONFIG, ), {}]) - - -def test_python_yapf_config_path(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - ("PythonChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_path, ): - assert checker.yapf_config_path == m_path.return_value.joinpath.return_value - - assert ( - list(m_path.return_value.joinpath.call_args) - == [(python_check.YAPF_CONFIG, ), {}]) - - -def test_python_yapf_files(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - - patched = patches( - ("PythonChecker.args", dict(new_callable=PropertyMock)), - ("PythonChecker.path", dict(new_callable=PropertyMock)), - "yapf.file_resources.GetCommandLineFiles", - "yapf.file_resources.GetExcludePatternsForDir", - prefix="tools.code_format.python_check") - - with patched as (m_args, m_path, m_yapf_files, m_yapf_exclude): - assert checker.yapf_files == m_yapf_files.return_value - - assert ( - list(m_yapf_files.call_args) - == [(m_args.return_value.paths,), - {'recursive': m_args.return_value.recurse, - 'exclude': m_yapf_exclude.return_value}]) - assert ( - list(m_yapf_exclude.call_args) - == [(str(m_path.return_value),), {}]) - - -def test_python_add_arguments(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - add_mock = patch("tools.code_format.python_check.checker.AsyncChecker.add_arguments") - m_parser = MagicMock() - - with add_mock as m_add: - checker.add_arguments(m_parser) - - assert ( - list(m_add.call_args) - == [(m_parser,), {}]) - assert ( - list(list(c) for c in m_parser.add_argument.call_args_list) - == [[('--recurse', '-r'), - {'choices': ['yes', 'no'], - 'default': 'yes', - 'help': 'Recurse path or paths directories'}], - [('--diff-file',), - {'default': None, 'help': 'Specify the path to a diff file with fixes'}]]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("errors", [[], ["err1", "err2"]]) -async def test_python_check_flake8(patches, errors): - checker = python_check.PythonChecker("path1", "path2", "path3") - - patched = patches( - "utils.buffered", - "PythonChecker.error", - "PythonChecker._strip_lines", - ("PythonChecker.flake8_app", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - @contextmanager - def mock_buffered(stdout=None, mangle=None): - yield - stdout.extend(errors) - - with patched as (m_buffered, m_error, m_mangle, m_flake8_app): - m_buffered.side_effect = mock_buffered - assert not await checker.check_flake8() - - assert ( - list(m_buffered.call_args) - == [(), {'stdout': errors, 'mangle': m_mangle}]) - assert ( - list(m_flake8_app.return_value.run_checks.call_args) - == [(), {}]) - assert ( - list(m_flake8_app.return_value.report.call_args) - == [(), {}]) - - if errors: - assert ( - list(m_error.call_args) - == [('flake8', ['err1', 'err2']), {}]) - else: - assert not m_error.called - - -def test_python_check_recurse(): - checker = python_check.PythonChecker("path1", "path2", "path3") - args_mock = patch( - "tools.code_format.python_check.PythonChecker.args", - new_callable=PropertyMock) - - with args_mock as m_args: - assert checker.recurse == m_args.return_value.recurse - assert "recurse" not in checker.__dict__ - - -@pytest.mark.asyncio -async def test_python_check_yapf(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "aio", - ("PythonChecker.yapf_format", dict(new_callable=MagicMock)), - "PythonChecker.yapf_result", - ("PythonChecker.yapf_files", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - files = ["file1", "file2", "file3"] - - async def concurrent(iters): - assert isinstance(iters, types.GeneratorType) - for i, format_result in enumerate(iters): - yield (format_result, (f"REFORMAT{i}", f"ENCODING{i}", f"CHANGED{i}")) - - with patched as (m_aio, m_yapf_format, m_yapf_result, m_yapf_files): - m_yapf_files.return_value = files - m_aio.concurrent.side_effect = concurrent - assert not await checker.check_yapf() - - assert ( - list(list(c) for c in m_yapf_format.call_args_list) - == [[(file,), {}] for file in files]) - assert ( - list(list(c) for c in m_yapf_result.call_args_list) - == [[(m_yapf_format.return_value, f"REFORMAT{i}", f"CHANGED{i}"), {}] for i, _ in enumerate(files)]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("errors", [[], ["check2", "check3"], ["check1", "check3"]]) -@pytest.mark.parametrize("warnings", [[], ["check4", "check5"], ["check1", "check5"]]) -async def test_python_on_check_run(patches, errors, warnings): - checker = python_check.PythonChecker("path1", "path2", "path3") - checkname = "check1" - patched = patches( - "PythonChecker.succeed", - ("PythonChecker.name", dict(new_callable=PropertyMock)), - ("PythonChecker.failed", dict(new_callable=PropertyMock)), - ("PythonChecker.warned", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_succeed, m_name, m_failed, m_warned): - m_failed.return_value = errors - m_warned.return_value = warnings - assert not await checker.on_check_run(checkname) - - if checkname in warnings or checkname in errors: - assert not m_succeed.called - else: - assert ( - list(m_succeed.call_args) - == [(checkname, [checkname]), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("diff_path", ["", "DIFF1"]) -@pytest.mark.parametrize("failed", [True, False]) -async def test_python_on_checks_complete(patches, diff_path, failed): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "aio", - ("checker.AsyncChecker.on_checks_complete", dict(new_callable=AsyncMock)), - ("PythonChecker.diff_file_path", dict(new_callable=PropertyMock)), - ("PythonChecker.has_failed", dict(new_callable=PropertyMock)), - ("PythonChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_aio, m_super, m_diff, m_failed, m_path): - m_aio.async_subprocess.run = AsyncMock() - if not diff_path: - m_diff.return_value = None - m_failed.return_value = failed - assert await checker.on_checks_complete() == m_super.return_value - - if diff_path and failed: - assert ( - list(m_aio.async_subprocess.run.call_args) - == [(['git', 'diff', 'HEAD'],), - dict(capture_output=True, cwd=m_path.return_value)]) - assert ( - list(m_diff.return_value.write_bytes.call_args) - == [(m_aio.async_subprocess.run.return_value.stdout,), {}]) - else: - assert not m_aio.async_subprocess.run.called - - assert ( - list(m_super.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("fix", [True, False]) -async def test_python_yapf_format(patches, fix): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "yapf.yapf_api.FormatFile", - ("PythonChecker.yapf_config_path", dict(new_callable=PropertyMock)), - ("PythonChecker.fix", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_format, m_config, m_fix): - m_fix.return_value = fix - assert await checker.yapf_format("FILENAME") == ("FILENAME", m_format.return_value) - - assert ( - list(m_format.call_args) - == [('FILENAME',), - {'style_config': str(m_config.return_value), - 'in_place': fix, - 'print_diff': not fix}]) - assert ( - list(list(c) for c in m_fix.call_args_list) - == [[(), {}], [(), {}]]) - - -@pytest.mark.parametrize("reformatted", ["", "REFORMAT"]) -@pytest.mark.parametrize("fix", [True, False]) -@pytest.mark.parametrize("changed", [True, False]) -def test_python_yapf_result(patches, reformatted, fix, changed): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "PythonChecker.succeed", - "PythonChecker.warn", - "PythonChecker.error", - ("PythonChecker.fix", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_succeed, m_warn, m_error, m_fix): - m_fix.return_value = fix - checker.yapf_result("FILENAME", reformatted, changed) - - if not changed: - assert ( - list(m_succeed.call_args) - == [('yapf', ['FILENAME']), {}]) - assert not m_warn.called - assert not m_error.called - assert not m_fix.called - return - assert not m_succeed.called - if fix: - assert not m_error.called - assert len(m_warn.call_args_list) == 1 - assert ( - list(m_warn.call_args) - == [('yapf', [f'FILENAME: reformatted']), {}]) - return - if reformatted: - assert not m_error.called - assert len(m_warn.call_args_list) == 1 - assert ( - list(m_warn.call_args) - == [('yapf', [f'FILENAME: diff\n{reformatted}']), {}]) - return - assert not m_warn.called - assert ( - list(m_error.call_args) - == [('yapf', ['FILENAME']), {}]) - - -def test_python_strip_lines(): - checker = python_check.PythonChecker("path1", "path2", "path3") - strip_mock = patch("tools.code_format.python_check.PythonChecker._strip_line") - lines = ["", "foo", "", "bar", "", "", "baz", "", ""] - - with strip_mock as m_strip: - assert ( - checker._strip_lines(lines) - == [m_strip.return_value] * 3) - - assert ( - list(list(c) for c in m_strip.call_args_list) - == [[('foo',), {}], [('bar',), {}], [('baz',), {}]]) - - -@pytest.mark.parametrize("line", ["REMOVE/foo", "REMOVE", "bar", "other", "REMOVE/baz", "baz"]) -def test_python_strip_line(line): - checker = python_check.PythonChecker("path1", "path2", "path3") - path_mock = patch( - "tools.code_format.python_check.PythonChecker.path", - new_callable=PropertyMock) - - with path_mock as m_path: - m_path.return_value = "REMOVE" - assert ( - checker._strip_line(line) - == line[7:] if line.startswith(f"REMOVE/") else line) - - -def test_python_checker_main(command_main): - command_main( - python_check.main, - "tools.code_format.python_check.PythonChecker") diff --git a/tools/dependency/BUILD b/tools/dependency/BUILD index f8945ed0136cc..b1e0af18c2aea 100644 --- a/tools/dependency/BUILD +++ b/tools/dependency/BUILD @@ -1,6 +1,6 @@ load("@rules_python//python:defs.bzl", "py_binary", "py_library") load("//bazel:envoy_build_system.bzl", "envoy_package") -load("//tools/base:envoy_python.bzl", "envoy_py_binary") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 @@ -11,8 +11,8 @@ py_library( srcs = ["exports.py"], data = [ "//bazel:repository_locations.bzl", - "@envoy_api_canonical//bazel:repository_locations.bzl", - "@envoy_api_canonical//bazel:repository_locations_utils.bzl", + "@envoy_api//bazel:repository_locations.bzl", + "@envoy_api//bazel:repository_locations_utils.bzl", ], ) @@ -41,10 +41,11 @@ py_binary( ], ) -envoy_py_binary( - name = "tools.dependency.pip_check", +py_binary( + name = "pip_check", + srcs = ["pip_check.py"], deps = [ - "//tools/base:checker", - "//tools/base:utils", + "@envoy_repo", + requirement("envoy.dependency.pip_check"), ], ) diff --git a/tools/dependency/cve_scan.py b/tools/dependency/cve_scan.py index 5cdee9a90af33..e1d048ed93dce 100755 --- a/tools/dependency/cve_scan.py +++ b/tools/dependency/cve_scan.py @@ -75,6 +75,11 @@ 'CVE-2021-22931', 'CVE-2021-22939', 'CVE-2021-22940', + # This cve only affects versions of kafka < 2.8.1, but scanner + # does not support version matching atm. + # Tracking issue to fix versioning: + # https://github.com/envoyproxy/envoy/issues/18354 + 'CVE-2021-38153', ]) # Subset of CVE fields that are useful below. diff --git a/tools/dependency/exports.py b/tools/dependency/exports.py index ed365b8a91d74..d20e7b4c79125 100644 --- a/tools/dependency/exports.py +++ b/tools/dependency/exports.py @@ -17,7 +17,7 @@ def load_module(name, path): # this is the relative path in a bazel build # to call this module outside of a bazel build set the `API_PATH` first, # for example, if running from the envoy repo root: `export API_PATH=api/` -api_path = os.getenv("API_PATH", "external/envoy_api_canonical") +api_path = os.getenv("API_PATH", "external/envoy_api") # Modules envoy_repository_locations = load_module( diff --git a/tools/dependency/pip_check.py b/tools/dependency/pip_check.py index 91a8456fc2854..4da2ac5f8fb4e 100755 --- a/tools/dependency/pip_check.py +++ b/tools/dependency/pip_check.py @@ -4,96 +4,51 @@ # # with bazel: # -# bazel //tools/dependency:pip_check -- -h +# $ bazel run //tools/dependency:pip_check -- -h # -# alternatively, if you have the necessary python deps available +# $ bazel run //tools/dependency:pip_check # -# ./tools/dependency/pip_check.py -h +# with pip: +# +# $ pip install envoy.dependency.pip_check +# $ envoy.dependency.pip_check -h +# +# usage with pip requires a path, eg +# +# $ envoy.dependency.pip_check . +# +# The upstream lib is maintained here: +# +# https://github.com/envoyproxy/pytooling/tree/main/envoy.dependency.pip_check +# +# Please submit issues/PRs to the pytooling repo: +# +# https://github.com/envoyproxy/pytooling # +import pathlib import sys from functools import cached_property -from typing import Iterable, Set - -from tools.base import checker, utils - -DEPENDABOT_CONFIG = ".github/dependabot.yml" -REQUIREMENTS_FILENAME = "requirements.txt" -# TODO(phlax): add checks for: -# - requirements can be installed together -# - pip-compile formatting +import abstracts +from envoy.dependency import pip_check -class PipConfigurationError(Exception): - pass +import envoy_repo -class PipChecker(checker.Checker): - checks = ("dependabot",) - _dependabot_config = DEPENDABOT_CONFIG - _requirements_filename = REQUIREMENTS_FILENAME +@abstracts.implementer(pip_check.APipChecker) +class EnvoyPipChecker: @cached_property - def config_requirements(self) -> set: - """Set of configured pip dependabot directories""" - return set( - update['directory'] - for update in self.dependabot_config["updates"] - if update["package-ecosystem"] == "pip") - - @cached_property - def dependabot_config(self) -> dict: - """Parsed dependabot config""" - result = utils.from_yaml(self.path.joinpath(self.dependabot_config_path)) - if not isinstance(result, dict): - raise PipConfigurationError( - f"Unable to parse dependabot config: {self.dependabot_config_path}") - return result - - @property - def dependabot_config_path(self) -> str: - return self._dependabot_config - - @cached_property - def requirements_dirs(self) -> Set[str]: - """Set of found directories in the repo containing requirements.txt""" - return set( - f"/{f.parent.relative_to(self.path)}" for f in self.path.glob("**/*") - if f.name == self.requirements_filename) - - @property - def requirements_filename(self) -> str: - return self._requirements_filename - - def check_dependabot(self) -> None: - """Check that dependabot config matches requirements.txt files found in repo""" - missing_dirs = self.config_requirements.difference(self.requirements_dirs) - missing_config = self.requirements_dirs.difference(self.config_requirements) - correct = self.requirements_dirs.intersection(self.config_requirements) - if correct: - self.dependabot_success(correct) - if missing_dirs: - self.dependabot_errors( - missing_dirs, - f"Missing {self.requirements_filename} dir, specified in dependabot config") - if missing_config: - self.dependabot_errors( - missing_config, - f"Missing dependabot config for {self.requirements_filename} in dir") - - def dependabot_success(self, correct: Iterable) -> None: - self.succeed( - "dependabot", - ([f"{self.requirements_filename}: {dirname}" for dirname in sorted(correct)])) - - def dependabot_errors(self, missing: Iterable, msg: str) -> None: - for dirname in sorted(missing): - self.error("dependabot", [f"{msg}: {dirname}"]) + def path(self) -> pathlib.Path: + if self.args.paths: + return pathlib.Path(self.args.paths[0]) + return pathlib.Path(envoy_repo.PATH) def main(*args) -> int: - return PipChecker(*args).run() + return EnvoyPipChecker(*args).run() if __name__ == "__main__": diff --git a/tools/dependency/release_dates.py b/tools/dependency/release_dates.py index 10733549ae779..aed1021071898 100644 --- a/tools/dependency/release_dates.py +++ b/tools/dependency/release_dates.py @@ -13,34 +13,56 @@ import os import sys +import argparse +import string + +import pytz import github import exports import utils - from colorama import Fore, Style from packaging import version +# Tag issues created with these labels. +LABELS = ['dependencies', 'area/build', 'no stalebot'] +GITHUB_REPO_LOCATION = "envoyproxy/envoy" + +BODY_TPL = """ +Package Name: ${dep} +Current Version: ${metadata_version}@${release_date} +Available Version: ${tag_name}@${created_at} +Upstream releases: https://github.com/${package_name}/releases +""" + +CLOSING_TPL = """ +New version is available for this package +New Version: ${tag_name}@${created_at} +Upstream releases: https://github.com/${full_name}/releases +New Issue Link: https://github.com/${repo_location}/issues/${number} +""" + # Thrown on errors related to release date or version. class ReleaseDateVersionError(Exception): pass +# Errors that happen during issue creation. +class DependencyUpdateError(Exception): + pass + + # Format a datetime object as UTC YYYY-MM-DD. def format_utc_date(date): - # We only handle naive datetime objects right now, which is what PyGithub - # appears to be handing us. - if date.tzinfo is not None: - raise ReleaseDateVersionError( - "Expected UTC date without timezone information. Received timezone information") + date = date.replace(tzinfo=pytz.UTC) return date.date().isoformat() # Obtain latest release version and compare against metadata version, warn on # mismatch. -def verify_and_print_latest_release(dep, repo, metadata_version, release_date): +def verify_and_print_latest_release(dep, repo, metadata_version, release_date, create_issue=False): try: latest_release = repo.get_latest_release() except github.GithubException as err: @@ -51,6 +73,122 @@ def verify_and_print_latest_release(dep, repo, metadata_version, release_date): print( f'{Fore.YELLOW}*WARNING* {dep} has a newer release than {metadata_version}@<{release_date}>: ' f'{latest_release.tag_name}@<{latest_release.created_at}>{Style.RESET_ALL}') + # check for --check_deps flag, To run this only on github action schedule + # and it does not bloat CI on every push + if create_issue: + create_issues(dep, repo, metadata_version, release_date, latest_release) + + +def is_sha(text): + if len(text) != 40: + return False + try: + int(text, 16) + except ValueError: + return False + return True + + +# create issue for stale dependency +def create_issues(dep, package_repo, metadata_version, release_date, latest_release): + """Create issues in GitHub. + + Args: + dep : name of the deps + package_repo: package Url + metadata_version: current version information + release_date : old release_date + latest_release : latest_release (name and date ) + """ + access_token = os.getenv('GITHUB_TOKEN') + git = github.Github(access_token) + repo = git.get_repo(GITHUB_REPO_LOCATION) + # Find GitHub label objects for LABELS. + labels = [] + for label in repo.get_labels(): + if label.name in LABELS: + labels.append(label.name) + if len(labels) != len(LABELS): + raise DependencyUpdateError('Unknown labels (expected %s, got %s)' % (LABELS, labels)) + # trunctate metadata_version to 7 char if its sha_hash + if is_sha(metadata_version): + metadata_version = metadata_version[0:7] + title = f'Newer release available `{dep}`: {latest_release.tag_name} (current: {metadata_version})' + # search for old package opened issue and close them + body = string.Template(BODY_TPL).substitute( + dep=dep, + metadata_version=metadata_version, + release_date=release_date, + tag_name=latest_release.tag_name, + created_at=latest_release.created_at, + package_name=package_repo.full_name) + if issues_exist(title, git): + print("Issue with %s already exists" % title) + print(' >> Issue already exists, not posting!') + return + print('Creating issues...') + try: + issue_created = repo.create_issue(title, body=body, labels=LABELS) + latest_release.latest_issue_number = issue_created.number + except github.GithubException as e: + print(f'Unable to create issue, received error: {e}') + raise + search_old_version_open_issue_exist(title, git, package_repo, latest_release) + + +# checks if issue exist +def issues_exist(title, git): + # search for common title + title_search = title[0:title.index("(") - 1] + query = f'repo:{GITHUB_REPO_LOCATION} {title_search} in:title' + try: + issues = git.search_issues(query) + except github.GithubException as e: + print(f'There is a problem looking for issue title: {title}, received {e}') + raise + return issues.totalCount > 0 + + +# search for issue by title and delete old issue if new package version is available +def search_old_version_open_issue_exist(title, git, package_repo, latest_release): + # search for only "Newer release available {dep}:" as will be common in dep issue + title_search = title[0:title.index(":")] + query = f'repo:{GITHUB_REPO_LOCATION} {title_search} in:title is:open' + # there might be more than one issue + # if current package version == issue package version no need to do anything, right issue is open + # if current package version != issue_title_version means a newer updated version is available + # and close old issue + issues = git.search_issues(query) + for issue in issues: + issue_version = get_package_version_from_issue(issue.title) + if issue_version != latest_release.tag_name: + close_old_issue(git, issue.number, latest_release, package_repo) + + +def get_package_version_from_issue(issue_title): + # issue title create by github action has two form + return issue_title.split(":")[1].split("(")[0].strip() + + +def close_old_issue(git, issue_number, latest_release, package_repo): + repo = git.get_repo(GITHUB_REPO_LOCATION) + closing_comment = string.Template(CLOSING_TPL) + try: + issue = repo.get_issue(number=issue_number) + print(f'Publishing closing comment... ') + issue.create_comment( + closing_comment.substitute( + tag_name=latest_release.tag_name, + created_at=latest_release.created_at, + full_name=package_repo.full_name, + repo_location=GITHUB_REPO_LOCATION, + number=latest_release.latest_issue_number)) + print(f'Closing this issue as new package is available') + issue.edit(state='closed') + except github.GithubException as e: + print(f'There was a problem in publishing comment or closing this issue {e}') + raise + return # Print GitHub release date, throw ReleaseDateVersionError on mismatch with metadata release date. @@ -106,7 +244,7 @@ def get_untagged_release_date(repo, metadata_version, github_release): # Verify release dates in metadata against GitHub API. -def verify_and_print_release_dates(repository_locations, github_instance): +def verify_and_print_release_dates(repository_locations, github_instance, create_issue=False): for dep, metadata in sorted(repository_locations.items()): release_date = None # Obtain release information from GitHub API. @@ -122,7 +260,8 @@ def verify_and_print_release_dates(repository_locations, github_instance): release_date = get_untagged_release_date(repo, metadata['version'], github_release) if release_date: # Check whether there is a more recent version and warn if necessary. - verify_and_print_latest_release(dep, repo, github_release.version, release_date) + verify_and_print_latest_release( + dep, repo, github_release.version, release_date, create_issue) # Verify that the release date in metadata and GitHub correspond, # otherwise throw ReleaseDateVersionError. verify_and_print_release_date(dep, release_date, metadata['release_date']) @@ -132,19 +271,23 @@ def verify_and_print_release_dates(repository_locations, github_instance): if __name__ == '__main__': - if len(sys.argv) != 2: - print('Usage: %s ' % sys.argv[0]) - sys.exit(1) + # parsing location and github_action flag with argparse + parser = argparse.ArgumentParser() + parser.add_argument('location', type=str) + parser.add_argument('--create_issues', action='store_true') + args = parser.parse_args() access_token = os.getenv('GITHUB_TOKEN') if not access_token: print('Missing GITHUB_TOKEN') sys.exit(1) - path = sys.argv[1] + path = args.location + create_issue = args.create_issues spec_loader = exports.repository_locations_utils.load_repository_locations_spec path_module = exports.load_module('repository_locations', path) try: verify_and_print_release_dates( - spec_loader(path_module.REPOSITORY_LOCATIONS_SPEC), github.Github(access_token)) + spec_loader(path_module.REPOSITORY_LOCATIONS_SPEC), github.Github(access_token), + create_issue) except ReleaseDateVersionError as e: print( f'{Fore.RED}An error occurred while processing {path}, please verify the correctness of the ' diff --git a/tools/dependency/release_dates.sh b/tools/dependency/release_dates.sh index de12f53e4512b..51b48039a3ce5 100755 --- a/tools/dependency/release_dates.sh +++ b/tools/dependency/release_dates.sh @@ -7,4 +7,4 @@ set -e # TODO(phlax): move this job to bazel and remove this export API_PATH=api/ -PYTHONPATH=. python_venv release_dates "$1" +PYTHONPATH=. python_venv release_dates "$@" diff --git a/tools/dependency/requirements.txt b/tools/dependency/requirements.txt index 1d841a10db6dc..218ffecf8af5a 100644 --- a/tools/dependency/requirements.txt +++ b/tools/dependency/requirements.txt @@ -2,14 +2,12 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile --generate-hashes tools/dependency/requirements.txt +# pip-compile --allow-unsafe --generate-hashes requirements.in # certifi==2021.5.30 \ --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 - # via - # -r tools/dependency/requirements.txt - # requests + # via requests cffi==1.14.5 \ --hash=sha256:005a36f41773e148deac64b08f233873a4d0c18b053d37da83f6af4d9087b813 \ --hash=sha256:0857f0ae312d855239a55c81ef453ee8fd24136eaba8e87a2eceba644c0d4c06 \ @@ -48,53 +46,39 @@ cffi==1.14.5 \ --hash=sha256:d42b11d692e11b6634f7613ad8df5d6d5f8875f5d48939520d351007b3c13406 \ --hash=sha256:f2d45f97ab6bb54753eab54fffe75aaf3de4ff2341c9daee1987ee1837636f1d \ --hash=sha256:fd78e5fee591709f32ef6edb9a015b4aa1a5022598e36227500c8f4e02328d9c - # via - # -r tools/dependency/requirements.txt - # pynacl -chardet==4.0.0 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via -r tools/dependency/requirements.txt -charset-normalizer==2.0.4 \ - --hash=sha256:0c8911edd15d19223366a194a513099a302055a962bca2cec0f54b8b63175d8b \ - --hash=sha256:f23667ebe1084be45f6ae0538e4a5a865206544097e4e8bbcacf42cd02a348f3 + # via pynacl +charset-normalizer==2.0.6 \ + --hash=sha256:5d209c0a931f215cee683b6445e2d77677e7e75e159f78def0db09d68fafcaa6 \ + --hash=sha256:5ec46d183433dcbd0ab716f2d7f29d8dee50505b3fdb40c6b985c7c4f5a3591f # via requests colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 - # via -r tools/dependency/requirements.txt -deprecated==1.2.12 \ - --hash=sha256:08452d69b6b5bc66e8330adde0a4f8642e969b9e1702904d137eeb29c8ffc771 \ - --hash=sha256:6d2de2de7931a968874481ef30208fd4e08da39177d61d3d4ebdf4366e7dbca1 - # via - # -r tools/dependency/requirements.txt - # pygithub + # via -r requirements.in +deprecated==1.2.13 \ + --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d \ + --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d + # via pygithub idna==2.10 \ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 - # via - # -r tools/dependency/requirements.txt - # requests + # via requests packaging==21.0 \ --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 - # via -r tools/dependency/requirements.txt + # via -r requirements.in pycparser==2.20 \ --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 - # via - # -r tools/dependency/requirements.txt - # cffi + # via cffi pygithub==1.55 \ --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b - # via -r tools/dependency/requirements.txt + # via -r requirements.in pyjwt==2.1.0 \ --hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \ --hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130 - # via - # -r tools/dependency/requirements.txt - # pygithub + # via pygithub pynacl==1.4.0 \ --hash=sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4 \ --hash=sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4 \ @@ -114,35 +98,71 @@ pynacl==1.4.0 \ --hash=sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514 \ --hash=sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff \ --hash=sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80 - # via - # -r tools/dependency/requirements.txt - # pygithub + # via pygithub pyparsing==2.4.7 \ --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b - # via - # -r tools/dependency/requirements.txt - # packaging + # via packaging +pytz==2021.3 \ + --hash=sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c \ + --hash=sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326 + # via -r requirements.in requests==2.26.0 \ --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ --hash=sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7 - # via - # -r tools/dependency/requirements.txt - # pygithub + # via pygithub six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 - # via - # -r tools/dependency/requirements.txt - # pynacl -urllib3==1.26.6 \ - --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ - --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f - # via - # -r tools/dependency/requirements.txt - # requests -wrapt==1.12.1 \ - --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 - # via - # -r tools/dependency/requirements.txt - # deprecated + # via pynacl +urllib3==1.26.7 \ + --hash=sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844 \ + --hash=sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece + # via requests +wrapt==1.13.1 \ + --hash=sha256:97f016514ceac524832e7d1bd41cf928b992ebe0324d59736f84ad5f4bbe0632 \ + --hash=sha256:0b2cbe418beeff3aadb3afc39a67d3f5f6a3eb020ceb5f2bcf56bef14b33629a \ + --hash=sha256:95c9fcfc326fdd3e2fd264e808f6474ca7ffd253feb3a505ee5ceb4d78216ef7 \ + --hash=sha256:db0daf2afca9f3b3a76e96ecb5f55ba82615ec584471d7aa27c1bdeb9e3888bb \ + --hash=sha256:1b46e4fe0f9efbfaf1ee82fc79f9cb044c69b67b181c58370440d396fe40736e \ + --hash=sha256:b0eed9b295039a619f64667f27cffbffcfc0559073d562700912ca6266bc8b28 \ + --hash=sha256:8a6ba1b00d07f5a90a2d2eb1804a42e2067a6145b7745a8297664a75a8a232ba \ + --hash=sha256:947a8d9d7829364e11eca88af18394713c8f98571cbc672b12545977d837f054 \ + --hash=sha256:6aa687da5565674c9696fafd2b8d44a04fb697ec2431af21c3def9cbedc4082a \ + --hash=sha256:7929ce97be2f7c49f454a6f8e014225e53cc3767fe48cce94b188de2225232ac \ + --hash=sha256:2d18618440df6bc072762625e9c843d32a7328347c321b89f8df3a7c4a72ce6c \ + --hash=sha256:cb0b12b365b054bee2a53078a67df81781be0686cc3f3ab8bbdd16b2e188570a \ + --hash=sha256:3816922f0941f1637869a04e25d1e5261dfa55cc6b39c73872cbf192ea562443 \ + --hash=sha256:b41ce8ee3825634e67883dd4dab336f95d0cc9d223fb7e224dcd36d66af93694 \ + --hash=sha256:d0ae90fd60c7473e437b0dd48ae323c11f631fe47c243056f9e7505d26e8e2f6 \ + --hash=sha256:f4377eda306b488255ea4336662cd9015a902d6dc2ed77a3e4c1e3b42387453a \ + --hash=sha256:bc42803987eb46b5fc67ec9a072df15a72ee9db61e3b7dd955d82581bf141f60 \ + --hash=sha256:04a00cef5d1b9e0e8db997816437b436e859106283c4771a40c4de4759344765 \ + --hash=sha256:836c73f53a0cefc7ba10c6f4a0d78894cb4876f56035fe500b029e0a1ae0ffe9 \ + --hash=sha256:6c241b4ef0744590ae0ee89305743977e478200cff961bdcc6b3d0530aea3377 \ + --hash=sha256:19b2c992668c9ca764899bae52987a04041ebc21859d2646db0b27e089c2fd6b \ + --hash=sha256:9d200716eb4bb1d73f47f3ccc4f98fdf979dcc82d752183828f1be2e332b6874 \ + --hash=sha256:77fef0bfdc612f5f30e43392a9f67dddaf4f48f299421bf25f910d0f47173f3d \ + --hash=sha256:b1137e6aef3ac267c2af7d3af0266ef3f8dd1e5cde67b8eac9fa3b94e7fa0ada \ + --hash=sha256:972099fa9cf4e43c255701c78ec5098c2fec4d6ea669a110b3414a158e772b0a \ + --hash=sha256:5dc6c8cfaf4ff2a4632f8f97d29f555d6951eb0f905d3d47b3fd69bddb653214 \ + --hash=sha256:f1e2cea943192e24070b65bda862901c02bdf7c6abcd66ef5381ad6511921067 \ + --hash=sha256:8a184c655bb41295a9b0c28745a1b762c0c86025e43808b7e814f9cedc6c563d \ + --hash=sha256:6b81913fdba96e286f0c6007eb61f0158e64a1941bfc72fee61b34a4f8f9877f \ + --hash=sha256:aa637733f1d599077522f6a1f0c6c40389aa90a44cba37afcefef26f8e53d28f \ + --hash=sha256:ec803c9d6e4ce037201132d903ff8b0dd26c9688be50ce4c77c420c076e78ff7 \ + --hash=sha256:8055f8cc9a80dc1db01f31af6399b83f597ec164f07b7251d2a1bf1c6c025190 \ + --hash=sha256:3658ae9c704906cab5865a00c1aa9e1fd3555074d1a4462fa1742d7fea8260ae \ + --hash=sha256:9f839c47698052ef5c2c094e21f8a06d0828aebe52d20cdb505faa318c62e886 \ + --hash=sha256:fd5320bf61a2e8d3b46d9e183323293c9a695df8f38c98d17c45e1846758f9a9 \ + --hash=sha256:e2eb4f38441b56698b4d40d48fd331e4e8a0477264785d08cbced63813d4bd29 \ + --hash=sha256:2f6fbea8936ba862425664fc689182a8ef50a6d88cd49f3cd073eccd3e78c930 \ + --hash=sha256:4f3f99bb8eed5d394bbb898c5191ed91ebf21187d52b2c45895733ae2798f373 \ + --hash=sha256:21c1710f61aa95b4be83a32b6d6facbb0efdfac22dee65e1caa72a83deed7cda \ + --hash=sha256:40fd2cebad4010787de4221ec27a650635eed3e49e4bbfa8244fc34836cc2457 \ + --hash=sha256:c803526c0d3fa426e06de379b4eb56102234f2dc3c3a24a500d7962a83ca6166 \ + --hash=sha256:e5a0727ea56de6e9a17693589bcf913d6bf1ec49f12d4671993321f3325fda4f \ + --hash=sha256:04312fbf51e9dd15261228e6b4bed0c0ed5723ccf986645d2c7308511dccba35 \ + --hash=sha256:bd705e341baccc3d1ef20e790b1f6383bd4ae92a77ba87a86ece8189fab8793c \ + --hash=sha256:909a80ce028821c7ad01bdcaa588126825931d177cdccd00b3545818d4a195ce + # via deprecated diff --git a/tools/dependency/tests/test_pip_check.py b/tools/dependency/tests/test_pip_check.py deleted file mode 100644 index 0c3458626cc89..0000000000000 --- a/tools/dependency/tests/test_pip_check.py +++ /dev/null @@ -1,195 +0,0 @@ -from unittest.mock import MagicMock, patch, PropertyMock - -import pytest - -from tools.dependency import pip_check - - -def test_pip_checker_constructor(): - checker = pip_check.PipChecker("path1", "path2", "path3") - assert checker.checks == ("dependabot",) - assert checker.dependabot_config_path == pip_check.DEPENDABOT_CONFIG == ".github/dependabot.yml" - assert checker.requirements_filename == pip_check.REQUIREMENTS_FILENAME == "requirements.txt" - assert checker.args.paths == ['path1', 'path2', 'path3'] - - -def test_pip_checker_config_requirements(): - checker = pip_check.PipChecker("path1", "path2", "path3") - - config_mock = patch( - "tools.dependency.pip_check.PipChecker.dependabot_config", - new_callable=PropertyMock) - - with config_mock as m_config: - m_config.return_value.__getitem__.return_value = [ - {"package-ecosystem": "pip", "directory": "dir1"}, - {"package-ecosystem": "not-pip", "directory": "dir2"}, - {"package-ecosystem": "pip", "directory": "dir3"}] - assert checker.config_requirements == {'dir1', 'dir3'} - assert ( - list(m_config.return_value.__getitem__.call_args) - == [('updates',), {}]) - - -@pytest.mark.parametrize("isdict", [True, False]) -def test_pip_checker_dependabot_config(patches, isdict): - checker = pip_check.PipChecker("path1", "path2", "path3") - patched = patches( - "utils", - ("PipChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.dependency.pip_check") - - with patched as (m_utils, m_path): - if isdict: - m_utils.from_yaml.return_value = {} - - if isdict: - assert checker.dependabot_config == m_utils.from_yaml.return_value - else: - with pytest.raises(pip_check.PipConfigurationError) as e: - checker.dependabot_config - - assert ( - e.value.args[0] - == f'Unable to parse dependabot config: {checker.dependabot_config_path}') - - assert ( - list(m_path.return_value.joinpath.call_args) - == [(checker._dependabot_config, ), {}]) - assert ( - list(m_utils.from_yaml.call_args) - == [(m_path.return_value.joinpath.return_value,), {}]) - - -def test_pip_checker_requirements_dirs(patches): - checker = pip_check.PipChecker("path1", "path2", "path3") - dummy_glob = [ - "FILE1", "FILE2", "FILE3", - "REQUIREMENTS_FILE", "FILE4", - "REQUIREMENTS_FILE", "FILE5"] - patched = patches( - ("PipChecker.requirements_filename", dict(new_callable=PropertyMock)), - ("PipChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.dependency.pip_check") - expected = [] - - with patched as (m_reqs, m_path): - m_reqs.return_value = "REQUIREMENTS_FILE" - _glob = [] - - for fname in dummy_glob: - _mock = MagicMock() - _mock.name = fname - if fname == "REQUIREMENTS_FILE": - expected.append(_mock) - _glob.append(_mock) - - m_path.return_value.glob.return_value = _glob - assert checker.requirements_dirs == {f"/{f.parent.relative_to.return_value}" for f in expected} - - for exp in expected: - assert ( - list(exp.parent.relative_to.call_args) - == [(m_path.return_value,), {}]) - assert "requirements_dirs" in checker.__dict__ - - -TEST_REQS = ( - (set(), set()), - (set(["A", "B"]), set()), - (set(["A", "B"]), set(["B", "C"])), - (set(["A", "B", "C"]), set(["A", "B", "C"])), - (set(), set(["B", "C"]))) - - -@pytest.mark.parametrize("requirements", TEST_REQS) -def test_pip_checker_check_dependabot(patches, requirements): - config, dirs = requirements - checker = pip_check.PipChecker("path1", "path2", "path3") - - patched = patches( - ("PipChecker.config_requirements", dict(new_callable=PropertyMock)), - ("PipChecker.requirements_dirs", dict(new_callable=PropertyMock)), - ("PipChecker.requirements_filename", dict(new_callable=PropertyMock)), - "PipChecker.dependabot_success", - "PipChecker.dependabot_errors", - prefix="tools.dependency.pip_check") - - with patched as (m_config, m_dirs, m_fname, m_success, m_errors): - m_config.return_value = config - m_dirs.return_value = dirs - assert not checker.check_dependabot() - - if config & dirs: - assert ( - list(m_success.call_args) - == [(config & dirs, ), {}]) - else: - assert not m_success.called - - if config - dirs: - assert ( - [(config - dirs, f"Missing {m_fname.return_value} dir, specified in dependabot config"), {}] - in list(list(c) for c in m_errors.call_args_list)) - - if dirs - config: - assert ( - [(dirs - config, f"Missing dependabot config for {m_fname.return_value} in dir"), {}] - in list(list(c) for c in m_errors.call_args_list)) - - if not config - dirs and not dirs - config: - assert not m_errors.called - - -def test_pip_checker_dependabot_success(patches): - checker = pip_check.PipChecker("path1", "path2", "path3") - succeed_mock = patch - success = set(["C", "D", "B", "A"]) - - patched = patches( - "PipChecker.succeed", - ("PipChecker.requirements_filename", dict(new_callable=PropertyMock)), - prefix="tools.dependency.pip_check") - - with patched as (m_succeed, m_fname): - checker.dependabot_success(success) - - assert ( - list(m_succeed.call_args) - == [('dependabot', - [f"{m_fname.return_value}: {x}" for x in sorted(success)]), {}]) - - -def test_pip_checker_dependabot_errors(patches): - checker = pip_check.PipChecker("path1", "path2", "path3") - succeed_mock = patch - errors = set(["C", "D", "B", "A"]) - MSG = "ERROR MESSAGE" - - patched = patches( - "PipChecker.error", - ("PipChecker.name", dict(new_callable=PropertyMock)), - prefix="tools.dependency.pip_check") - - with patched as (m_error, m_name): - checker.dependabot_errors(errors, MSG) - - assert ( - list(list(c) for c in list(m_error.call_args_list)) - == [[('dependabot', [f'ERROR MESSAGE: {x}']), {}] for x in sorted(errors)]) - - -def test_pip_checker_main(): - class_mock = patch("tools.dependency.pip_check.PipChecker") - - with class_mock as m_class: - assert ( - pip_check.main("arg0", "arg1", "arg2") - == m_class.return_value.run.return_value) - - assert ( - list(m_class.call_args) - == [('arg0', 'arg1', 'arg2'), {}]) - assert ( - list(m_class.return_value.run.call_args) - == [(), {}]) diff --git a/tools/dependency/utils.py b/tools/dependency/utils.py index e47107fe14635..1f44241402294 100644 --- a/tools/dependency/utils.py +++ b/tools/dependency/utils.py @@ -39,11 +39,11 @@ def get_github_release_from_urls(urls): if components[5] == 'archive': # Only support .tar.gz, .zip today. Figure out the release tag from this # filename. - if components[6].endswith('.tar.gz'): - github_version = components[6][:-len('.tar.gz')] + if components[-1].endswith('.tar.gz'): + github_version = components[-1][:-len('.tar.gz')] else: - assert (components[6].endswith('.zip')) - github_version = components[6][:-len('.zip')] + assert (components[-1].endswith('.zip')) + github_version = components[-1][:-len('.zip')] else: # Release tag is a path component. assert (components[5] == 'releases') diff --git a/tools/dependency/validate.py b/tools/dependency/validate.py index 100c104cb26f3..f35c0b97f0b17 100755 --- a/tools/dependency/validate.py +++ b/tools/dependency/validate.py @@ -39,7 +39,7 @@ def load_module(name, path): IGNORE_DEPS = set([ 'envoy', 'envoy_api', - 'envoy_api_canonical', + 'envoy_api', 'platforms', 'bazel_tools', 'local_config_cc', @@ -76,12 +76,12 @@ class DependencyInfo(object): def deps_by_use_category(self, use_category): """Find the set of external dependencies in a given use_category. - Args: - use_category: string providing use_category. + Args: + use_category: string providing use_category. - Returns: - Set of dependency identifiers that match use_category. - """ + Returns: + Set of dependency identifiers that match use_category. + """ return set( name for name, metadata in REPOSITORY_LOCATIONS_SPEC.items() if use_category in metadata['use_category']) @@ -89,13 +89,13 @@ def deps_by_use_category(self, use_category): def get_metadata(self, dependency): """Obtain repository metadata for a dependency. - Args: - dependency: string providing dependency identifier. + Args: + dependency: string providing dependency identifier. - Returns: - A dictionary with the repository metadata as defined in - bazel/repository_locations.bzl. - """ + Returns: + A dictionary with the repository metadata as defined in + bazel/repository_locations.bzl. + """ return REPOSITORY_LOCATIONS_SPEC.get(dependency) @@ -116,12 +116,12 @@ def __init__( def query_external_deps(self, *targets): """Query the build graph for transitive external dependencies. - Args: - targets: Bazel targets. + Args: + targets: Bazel targets. - Returns: - A set of dependency identifiers that are reachable from targets. - """ + Returns: + A set of dependency identifiers that are reachable from targets. + """ deps_query = 'deps(set({}))'.format(' '.join(targets)) try: deps = subprocess.check_output(['bazel', 'query', deps_query], @@ -149,9 +149,9 @@ def query_external_deps(self, *targets): def list_extensions(self): """List all extensions. - Returns: - Dictionary items from source/extensions/extensions_build_config.bzl. - """ + Returns: + Dictionary items from source/extensions/extensions_build_config.bzl. + """ return extensions_build_config.EXTENSIONS.items() @@ -167,9 +167,9 @@ def __init__(self, dep_info, build_graph): def validate_build_graph_structure(self): """Validate basic assumptions about dependency relationship in the build graph. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print('Validating build dependency structure...') queried_core_ext_deps = self._build_graph.query_external_deps( '//source/exe:envoy_main_common_with_core_extensions_lib', '//source/extensions/...') @@ -183,9 +183,9 @@ def validate_build_graph_structure(self): def validate_test_only_deps(self): """Validate that test-only dependencies aren't included in //source/... - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print('Validating test-only dependencies...') # Validate that //source doesn't depend on test_only queried_source_deps = self._build_graph.query_external_deps('//source/...') @@ -208,12 +208,12 @@ def validate_test_only_deps(self): def validate_data_plane_core_deps(self): """Validate dataplane_core dependencies. - Check that we at least tag as dataplane_core dependencies that match some - well-known targets for the data-plane. + Check that we at least tag as dataplane_core dependencies that match some + well-known targets for the data-plane. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print('Validating data-plane dependencies...') # Necessary but not sufficient for dataplane. With some refactoring we could # probably have more precise tagging of dataplane/controlplane/other deps in @@ -238,13 +238,12 @@ def validate_data_plane_core_deps(self): def validate_control_plane_deps(self): """Validate controlplane dependencies. - Check that we at least tag as controlplane dependencies that match some - well-known targets for - the control-plane. + Check that we at least tag as controlplane dependencies that match some + well-known targets for the control-plane. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print('Validating control-plane dependencies...') # Necessary but not sufficient for controlplane. With some refactoring we could # probably have more precise tagging of dataplane/controlplane/other deps in @@ -265,13 +264,13 @@ def validate_control_plane_deps(self): def validate_extension_deps(self, name, target): """Validate that extensions are correctly declared for dataplane_ext and observability_ext. - Args: - name: extension name. - target: extension Bazel target. + Args: + name: extension name. + target: extension Bazel target. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print(f'Validating extension {name} dependencies...') queried_deps = self._build_graph.query_external_deps(target) marginal_deps = queried_deps.difference(self._queried_core_deps) @@ -297,9 +296,9 @@ def validate_extension_deps(self, name, target): def validate_all(self): """Collection of all validations. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ self.validate_build_graph_structure() self.validate_test_only_deps() self.validate_data_plane_core_deps() diff --git a/tools/deprecate_features/BUILD b/tools/deprecate_features/BUILD new file mode 100644 index 0000000000000..c3429723995d9 --- /dev/null +++ b/tools/deprecate_features/BUILD @@ -0,0 +1,9 @@ +load("@rules_python//python:defs.bzl", "py_binary") + +licenses(["notice"]) # Apache 2 + +py_binary( + name = "deprecate_features", + srcs = ["deprecate_features.py"], + deps = ["@envoy_repo"], +) diff --git a/tools/deprecate_features/deprecate_features.py b/tools/deprecate_features/deprecate_features.py index c7468e6784585..aa1d3ff0458f2 100644 --- a/tools/deprecate_features/deprecate_features.py +++ b/tools/deprecate_features/deprecate_features.py @@ -4,13 +4,15 @@ import re import subprocess import fileinput -from six.moves import input + +import envoy_repo # Sorts out the list of deprecated proto fields which should be disallowed and returns a tuple of # email and code changes. def deprecate_proto(): - grep_output = subprocess.check_output('grep -r "deprecated = true" api/*', shell=True) + grep_output = subprocess.check_output( + 'grep -r "deprecated = true" api/*', shell=True, cwd=envoy_repo.PATH) filenames_and_fields = set() diff --git a/tools/deprecate_features/deprecate_features.sh b/tools/deprecate_features/deprecate_features.sh deleted file mode 100644 index 661b348e0f0d7..0000000000000 --- a/tools/deprecate_features/deprecate_features.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -. tools/shell_utils.sh - -set -e - -python_venv deprecate_features diff --git a/tools/deprecate_features/requirements.txt b/tools/deprecate_features/requirements.txt deleted file mode 100644 index 643fcd2d4a395..0000000000000 --- a/tools/deprecate_features/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -six==1.16.0 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 diff --git a/tools/deprecate_version/BUILD b/tools/deprecate_version/BUILD new file mode 100644 index 0000000000000..40bb0e4c09277 --- /dev/null +++ b/tools/deprecate_version/BUILD @@ -0,0 +1,14 @@ +load("@rules_python//python:defs.bzl", "py_binary") +load("@base_pip3//:requirements.bzl", "requirement") + +licenses(["notice"]) # Apache 2 + +py_binary( + name = "deprecate_version", + srcs = ["deprecate_version.py"], + deps = [ + "@envoy_repo", + requirement("gitpython"), + requirement("pygithub"), + ], +) diff --git a/tools/deprecate_version/deprecate_version.py b/tools/deprecate_version/deprecate_version.py index 0e7ad87a82ba3..6d16814462979 100644 --- a/tools/deprecate_version/deprecate_version.py +++ b/tools/deprecate_version/deprecate_version.py @@ -1,14 +1,6 @@ -# Script for automating cleanup PR creation for deprecated runtime features +# Bazel usage # -# sh tools/deprecate_version/deprecate_version.sh -# -# Direct usage (not recommended): -# -# python tools/deprecate_version/deprecate_version.py -# -# e.g -# -# python tools/deprecate_version/deprecate_version.py +# bazel run //tools/deprecate_version:deprecate_version # # A GitHub access token must be set in GITHUB_TOKEN. To create one, go to # Settings -> Developer settings -> Personal access tokens in GitHub and create @@ -31,6 +23,8 @@ import github from git import Repo +import envoy_repo + try: input = raw_input # Python 2 except NameError: @@ -134,7 +128,7 @@ def create_issues(access_token, runtime_and_pr): def get_runtime_and_pr(): """Returns a list of tuples of [runtime features to deprecate, PR, commit the feature was added] """ - repo = Repo(os.getcwd()) + repo = Repo(envoy_repo.PATH) # grep source code looking for reloadable features which are true to find the # PR they were added. diff --git a/tools/deprecate_version/deprecate_version.sh b/tools/deprecate_version/deprecate_version.sh deleted file mode 100755 index 5421f66565b54..0000000000000 --- a/tools/deprecate_version/deprecate_version.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -. tools/shell_utils.sh - -set -e - -python_venv deprecate_version diff --git a/tools/deprecate_version/requirements.txt b/tools/deprecate_version/requirements.txt deleted file mode 100644 index e64b21c2feb4c..0000000000000 --- a/tools/deprecate_version/requirements.txt +++ /dev/null @@ -1,138 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --generate-hashes tools/deprecate_version/requirements.txt -# -certifi==2021.5.30 \ - --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ - --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee - # via - # -r tools/deprecate_version/requirements.txt - # requests -cffi==1.14.5 \ - --hash=sha256:005a36f41773e148deac64b08f233873a4d0c18b053d37da83f6af4d9087b813 \ - --hash=sha256:0857f0ae312d855239a55c81ef453ee8fd24136eaba8e87a2eceba644c0d4c06 \ - --hash=sha256:1071534bbbf8cbb31b498d5d9db0f274f2f7a865adca4ae429e147ba40f73dea \ - --hash=sha256:158d0d15119b4b7ff6b926536763dc0714313aa59e320ddf787502c70c4d4bee \ - --hash=sha256:1f436816fc868b098b0d63b8920de7d208c90a67212546d02f84fe78a9c26396 \ - --hash=sha256:2894f2df484ff56d717bead0a5c2abb6b9d2bf26d6960c4604d5c48bbc30ee73 \ - --hash=sha256:29314480e958fd8aab22e4a58b355b629c59bf5f2ac2492b61e3dc06d8c7a315 \ - --hash=sha256:34eff4b97f3d982fb93e2831e6750127d1355a923ebaeeb565407b3d2f8d41a1 \ - --hash=sha256:35f27e6eb43380fa080dccf676dece30bef72e4a67617ffda586641cd4508d49 \ - --hash=sha256:3d3dd4c9e559eb172ecf00a2a7517e97d1e96de2a5e610bd9b68cea3925b4892 \ - --hash=sha256:43e0b9d9e2c9e5d152946b9c5fe062c151614b262fda2e7b201204de0b99e482 \ - --hash=sha256:48e1c69bbacfc3d932221851b39d49e81567a4d4aac3b21258d9c24578280058 \ - --hash=sha256:51182f8927c5af975fece87b1b369f722c570fe169f9880764b1ee3bca8347b5 \ - --hash=sha256:58e3f59d583d413809d60779492342801d6e82fefb89c86a38e040c16883be53 \ - --hash=sha256:5de7970188bb46b7bf9858eb6890aad302577a5f6f75091fd7cdd3ef13ef3045 \ - --hash=sha256:65fa59693c62cf06e45ddbb822165394a288edce9e276647f0046e1ec26920f3 \ - --hash=sha256:69e395c24fc60aad6bb4fa7e583698ea6cc684648e1ffb7fe85e3c1ca131a7d5 \ - --hash=sha256:6c97d7350133666fbb5cf4abdc1178c812cb205dc6f41d174a7b0f18fb93337e \ - --hash=sha256:6e4714cc64f474e4d6e37cfff31a814b509a35cb17de4fb1999907575684479c \ - --hash=sha256:72d8d3ef52c208ee1c7b2e341f7d71c6fd3157138abf1a95166e6165dd5d4369 \ - --hash=sha256:8ae6299f6c68de06f136f1f9e69458eae58f1dacf10af5c17353eae03aa0d827 \ - --hash=sha256:8b198cec6c72df5289c05b05b8b0969819783f9418e0409865dac47288d2a053 \ - --hash=sha256:99cd03ae7988a93dd00bcd9d0b75e1f6c426063d6f03d2f90b89e29b25b82dfa \ - --hash=sha256:9cf8022fb8d07a97c178b02327b284521c7708d7c71a9c9c355c178ac4bbd3d4 \ - --hash=sha256:9de2e279153a443c656f2defd67769e6d1e4163952b3c622dcea5b08a6405322 \ - --hash=sha256:9e93e79c2551ff263400e1e4be085a1210e12073a31c2011dbbda14bda0c6132 \ - --hash=sha256:9ff227395193126d82e60319a673a037d5de84633f11279e336f9c0f189ecc62 \ - --hash=sha256:a465da611f6fa124963b91bf432d960a555563efe4ed1cc403ba5077b15370aa \ - --hash=sha256:ad17025d226ee5beec591b52800c11680fca3df50b8b29fe51d882576e039ee0 \ - --hash=sha256:afb29c1ba2e5a3736f1c301d9d0abe3ec8b86957d04ddfa9d7a6a42b9367e396 \ - --hash=sha256:b85eb46a81787c50650f2392b9b4ef23e1f126313b9e0e9013b35c15e4288e2e \ - --hash=sha256:bb89f306e5da99f4d922728ddcd6f7fcebb3241fc40edebcb7284d7514741991 \ - --hash=sha256:cbde590d4faaa07c72bf979734738f328d239913ba3e043b1e98fe9a39f8b2b6 \ - --hash=sha256:cd2868886d547469123fadc46eac7ea5253ea7fcb139f12e1dfc2bbd406427d1 \ - --hash=sha256:d42b11d692e11b6634f7613ad8df5d6d5f8875f5d48939520d351007b3c13406 \ - --hash=sha256:f2d45f97ab6bb54753eab54fffe75aaf3de4ff2341c9daee1987ee1837636f1d \ - --hash=sha256:fd78e5fee591709f32ef6edb9a015b4aa1a5022598e36227500c8f4e02328d9c - # via pynacl -chardet==4.0.0 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via - # -r tools/deprecate_version/requirements.txt - # requests -deprecated==1.2.12 \ - --hash=sha256:08452d69b6b5bc66e8330adde0a4f8642e969b9e1702904d137eeb29c8ffc771 \ - --hash=sha256:6d2de2de7931a968874481ef30208fd4e08da39177d61d3d4ebdf4366e7dbca1 - # via - # -r tools/deprecate_version/requirements.txt - # pygithub -gitdb==4.0.7 \ - --hash=sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0 \ - --hash=sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005 - # via - # -r tools/deprecate_version/requirements.txt - # gitpython -gitpython==3.1.18 \ - --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 \ - --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b - # via -r tools/deprecate_version/requirements.txt -idna==2.10 \ - --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ - --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 - # via - # -r tools/deprecate_version/requirements.txt - # requests -pycparser==2.20 \ - --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ - --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 - # via cffi -pygithub==1.55 \ - --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ - --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b - # via -r tools/deprecate_version/requirements.txt -pyjwt==2.1.0 \ - --hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \ - --hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130 - # via pygithub -pynacl==1.4.0 \ - --hash=sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4 \ - --hash=sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4 \ - --hash=sha256:2fe0fc5a2480361dcaf4e6e7cea00e078fcda07ba45f811b167e3f99e8cff574 \ - --hash=sha256:30f9b96db44e09b3304f9ea95079b1b7316b2b4f3744fe3aaecccd95d547063d \ - --hash=sha256:4e10569f8cbed81cb7526ae137049759d2a8d57726d52c1a000a3ce366779634 \ - --hash=sha256:511d269ee845037b95c9781aa702f90ccc36036f95d0f31373a6a79bd8242e25 \ - --hash=sha256:537a7ccbea22905a0ab36ea58577b39d1fa9b1884869d173b5cf111f006f689f \ - --hash=sha256:54e9a2c849c742006516ad56a88f5c74bf2ce92c9f67435187c3c5953b346505 \ - --hash=sha256:757250ddb3bff1eecd7e41e65f7f833a8405fede0194319f87899690624f2122 \ - --hash=sha256:7757ae33dae81c300487591c68790dfb5145c7d03324000433d9a2c141f82af7 \ - --hash=sha256:7c6092102219f59ff29788860ccb021e80fffd953920c4a8653889c029b2d420 \ - --hash=sha256:8122ba5f2a2169ca5da936b2e5a511740ffb73979381b4229d9188f6dcb22f1f \ - --hash=sha256:9c4a7ea4fb81536c1b1f5cc44d54a296f96ae78c1ebd2311bd0b60be45a48d96 \ - --hash=sha256:c914f78da4953b33d4685e3cdc7ce63401247a21425c16a39760e282075ac4a6 \ - --hash=sha256:cd401ccbc2a249a47a3a1724c2918fcd04be1f7b54eb2a5a71ff915db0ac51c6 \ - --hash=sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514 \ - --hash=sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff \ - --hash=sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80 - # via pygithub -requests==2.25.1 \ - --hash=sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804 \ - --hash=sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e - # via - # -r tools/deprecate_version/requirements.txt - # pygithub -six==1.16.0 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 - # via pynacl -smmap==4.0.0 \ - --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ - --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 - # via - # -r tools/deprecate_version/requirements.txt - # gitdb -urllib3==1.26.6 \ - --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ - --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f - # via - # -r tools/deprecate_version/requirements.txt - # requests -wrapt==1.12.1 \ - --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 - # via - # -r tools/deprecate_version/requirements.txt - # deprecated diff --git a/tools/distribution/BUILD b/tools/distribution/BUILD index 6b60dda875708..e00e257bdc4ea 100644 --- a/tools/distribution/BUILD +++ b/tools/distribution/BUILD @@ -1,6 +1,5 @@ load("//bazel:envoy_build_system.bzl", "envoy_package") -load("//tools/base:envoy_python.bzl", "envoy_py_script") -load("@base_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "entry_point") licenses(["notice"]) # Apache 2 @@ -10,26 +9,26 @@ exports_files([ "distrotest.sh", ]) -envoy_py_script( - name = "tools.distribution.release", - entry_point = "envoy.distribution.release", - deps = [ - requirement("envoy.distribution.release"), - ], +alias( + name = "release", + actual = entry_point( + pkg = "envoy.distribution.release", + script = "envoy.distribution.release", + ), ) -envoy_py_script( - name = "tools.distribution.sign", - entry_point = "envoy.gpg.sign", - deps = [ - requirement("envoy.gpg.sign"), - ], +alias( + name = "sign", + actual = entry_point( + pkg = "envoy.gpg.sign", + script = "envoy.gpg.sign", + ), ) -envoy_py_script( - name = "tools.distribution.verify", - entry_point = "envoy.distribution.verify", - deps = [ - requirement("envoy.distribution.verify"), - ], +alias( + name = "verify", + actual = entry_point( + pkg = "envoy.distribution.verify", + script = "envoy.distribution.verify", + ), ) diff --git a/tools/docs/BUILD b/tools/docs/BUILD index 4f82feb9de76e..5a53db4b7e654 100644 --- a/tools/docs/BUILD +++ b/tools/docs/BUILD @@ -1,6 +1,6 @@ load("@rules_python//python:defs.bzl", "py_binary") +load("@base_pip3//:requirements.bzl", "entry_point", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") -load("@base_pip3//:requirements.bzl", "requirement") load("//tools/base:envoy_python.bzl", "envoy_py_binary") licenses(["notice"]) # Apache 2 @@ -11,7 +11,7 @@ py_binary( name = "generate_extensions_security_rst", srcs = ["generate_extensions_security_rst.py"], deps = [ - "//tools/base:utils", + requirement("envoy.base.utils"), ], ) @@ -33,24 +33,24 @@ py_binary( ], ) -envoy_py_binary( - name = "tools.docs.sphinx_runner", - deps = [ - "//tools/base:runner", - "//tools/base:utils", - requirement("colorama"), - requirement("Sphinx"), - requirement("sphinx-copybutton"), - requirement("sphinx-rtd-theme"), - requirement("sphinx-tabs"), - requirement("sphinxcontrib-httpdomain"), - requirement("sphinxcontrib-serializinghtml"), - requirement("sphinxext-rediraffe"), - ], +# The upstream lib is maintained here: +# +# https://github.com/envoyproxy/pytooling/tree/main/envoy.docs.sphinx_runner +# +# Please submit issues/PRs to the pytooling repo: +# +# https://github.com/envoyproxy/pytooling + +alias( + name = "sphinx_runner", + actual = entry_point( + pkg = "envoy.docs.sphinx_runner", + script = "envoy.docs.sphinx_runner", + ), ) envoy_py_binary( name = "tools.docs.rst_check", data = ["//docs:root/version_history/current.rst"], - deps = ["//tools/base:checker"], + deps = [requirement("envoy.base.checker")], ) diff --git a/tools/docs/generate_api_rst.py b/tools/docs/generate_api_rst.py index e5539332de1fb..9ee68cd6efba5 100644 --- a/tools/docs/generate_api_rst.py +++ b/tools/docs/generate_api_rst.py @@ -31,11 +31,11 @@ def main(): # the contents of `proto_srcs` are the result of a bazel genquery, # containing bazel target rules, eg: # - # @envoy_api_canonical//envoy/watchdog/v3alpha:abort_action.proto + # @envoy_api//envoy/watchdog/v3:abort_action.proto # # this transforms them to a list with a "canonical" form of: # - # envoy/watchdog/v3alpha/abort_action.proto.rst + # envoy/watchdog/v3/abort_action.proto.rst # envoy_api_protos = [ f"{src.split('//')[1].replace(':', '/')}.rst" for src in f.read().split("\n") if src diff --git a/tools/docs/generate_extensions_security_rst.py b/tools/docs/generate_extensions_security_rst.py index 180e2eb247f69..a1f6b7a49bfd1 100644 --- a/tools/docs/generate_extensions_security_rst.py +++ b/tools/docs/generate_extensions_security_rst.py @@ -8,7 +8,7 @@ import sys import tarfile -from tools.base import utils +from envoy.base import utils def format_item(extension, metadata): diff --git a/tools/docs/rst_check.py b/tools/docs/rst_check.py index 8a5b692d036d3..b064393806a20 100644 --- a/tools/docs/rst_check.py +++ b/tools/docs/rst_check.py @@ -4,7 +4,7 @@ from functools import cached_property from typing import Iterator, List, Pattern -from tools.base import checker +from envoy.base import checker INVALID_REFLINK = r".* ref:.*" REF_WITH_PUNCTUATION_REGEX = r".*\. <[^<]*>`\s*" @@ -118,8 +118,9 @@ def check_reflink(self, line: str) -> List[str]: if self.invalid_reflink_re.match(line) else []) def check_ticks(self, line: str) -> List[str]: - return ([f"Backticks should come in pairs (except for links and refs): {line}"] if - (self.backticks_re.match(line)) else []) + return ([ + f"Backticks should come in pairs (``foo``) except for links (`title `_) or refs (ref:`text `): {line}" + ] if (self.backticks_re.match(line)) else []) def run_checks(self) -> Iterator[str]: self.set_tokens() diff --git a/tools/docs/sphinx_runner.py b/tools/docs/sphinx_runner.py deleted file mode 100644 index 53b330c16d2b9..0000000000000 --- a/tools/docs/sphinx_runner.py +++ /dev/null @@ -1,222 +0,0 @@ -import argparse -import os -import pathlib -import platform -import re -import sys -import tarfile -from functools import cached_property -from typing import Tuple - -from colorama import Fore, Style # type:ignore - -from sphinx.cmd.build import main as sphinx_build # type:ignore - -from tools.base import runner, utils - - -class SphinxBuildError(Exception): - pass - - -class SphinxEnvError(Exception): - pass - - -class SphinxRunner(runner.Runner): - _build_dir = "." - _build_sha = "UNKNOWN" - - @property - def blob_sha(self) -> str: - """Returns either the version tag or the current build sha""" - return self.docs_tag or self.build_sha - - @property - def build_dir(self) -> pathlib.Path: - """Returns current build_dir - most likely a temp directory""" - return pathlib.Path(self.tempdir.name) - - @property - def build_sha(self) -> str: - """Returns either a provided build_sha or a default""" - return self.args.build_sha or self._build_sha - - @cached_property - def colors(self) -> dict: - """Color scheme for build summary""" - return dict(chrome=Fore.LIGHTYELLOW_EX, key=Fore.LIGHTCYAN_EX, value=Fore.LIGHTMAGENTA_EX) - - @cached_property - def config_file(self) -> pathlib.Path: - """Populates a config file with self.configs and returns the file path""" - return utils.to_yaml(self.configs, self.config_file_path) - - @property - def config_file_path(self) -> pathlib.Path: - """Path to a (temporary) build config""" - return self.build_dir.joinpath("build.yaml") - - @cached_property - def configs(self) -> dict: - """Build configs derived from provided args""" - _configs = dict( - version_string=self.version_string, - release_level=self.release_level, - blob_sha=self.blob_sha, - version_number=self.version_number, - docker_image_tag_name=self.docker_image_tag_name) - if self.validator_path: - _configs["validator_path"] = str(self.validator_path) - if self.descriptor_path: - _configs["descriptor_path"] = str(self.descriptor_path) - return _configs - - @property - def descriptor_path(self) -> pathlib.Path: - """Path to a descriptor file for config validation""" - return pathlib.Path(self.args.descriptor_path) - - @property - def docker_image_tag_name(self) -> str: - """Tag name of current docker image""" - return re.sub(r"([0-9]+\.[0-9]+)\.[0-9]+.*", r"v\1-latest", self.version_number) - - @property - def docs_tag(self) -> str: - """Tag name - ie named version for this docs build""" - return self.args.docs_tag - - @cached_property - def html_dir(self) -> pathlib.Path: - """Path to (temporary) directory for outputting html""" - return self.build_dir.joinpath("generated", "html") - - @property - def output_filename(self) -> pathlib.Path: - """Path to tar file for saving generated html docs""" - return pathlib.Path(self.args.output_filename) - - @property - def py_compatible(self) -> bool: - """Current python version is compatible""" - return bool(sys.version_info.major == 3 and sys.version_info.minor >= 8) - - @property - def release_level(self) -> str: - """Current python version is compatible""" - return "tagged" if self.docs_tag else "pre-release" - - @cached_property - def rst_dir(self) -> pathlib.Path: - """Populates an rst directory with contents of given rst tar, - and returns the path to the directory - """ - rst_dir = self.build_dir.joinpath("generated", "rst") - if self.rst_tar: - utils.extract(rst_dir, self.rst_tar) - return rst_dir - - @property - def rst_tar(self) -> pathlib.Path: - """Path to the rst tarball""" - return pathlib.Path(self.args.rst_tar) - - @property - def sphinx_args(self) -> Tuple[str, ...]: - """Command args for sphinx""" - return ( - "-W", "--keep-going", "--color", "-b", "html", str(self.rst_dir), str(self.html_dir)) - - @property - def validator_path(self) -> pathlib.Path: - """Path to validator utility for validating snippets""" - return pathlib.Path(self.args.validator_path) - - @property - def version_file(self) -> pathlib.Path: - """Path to version files for deriving docs version""" - return pathlib.Path(self.args.version_file) - - @cached_property - def version_number(self) -> str: - """Semantic version""" - return self.version_file.read_text().strip() - - @property - def version_string(self) -> str: - """Version string derived from either docs_tag or build_sha""" - return ( - f"tag-{self.docs_tag}" - if self.docs_tag else f"{self.version_number}-{self.build_sha[:6]}") - - def add_arguments(self, parser: argparse.ArgumentParser) -> None: - super().add_arguments(parser) - parser.add_argument("--build_sha") - parser.add_argument("--docs_tag") - parser.add_argument("--version_file") - parser.add_argument("--validator_path") - parser.add_argument("--descriptor_path") - parser.add_argument("rst_tar") - parser.add_argument("output_filename") - - def build_html(self) -> None: - if sphinx_build(self.sphinx_args): - raise SphinxBuildError("BUILD FAILED") - - def build_summary(self) -> None: - print() - print(self._color("#### Sphinx build configs #####################")) - print(self._color("###")) - for k, v in self.configs.items(): - print(f"{self._color('###')} {self._color(k, 'key')}: {self._color(v, 'value')}") - print(self._color("###")) - print(self._color("###############################################")) - print() - - def check_env(self) -> None: - if not self.py_compatible: - raise SphinxEnvError( - f"ERROR: python version must be >= 3.8, you have {platform.python_version()}") - if not self.configs["release_level"] == "tagged": - return - if f"v{self.version_number}" != self.docs_tag: - raise SphinxEnvError( - "Given git tag does not match the VERSION file content:" - f"{self.docs_tag} vs v{self.version_number}") - # this should probs only check the first line - version_current = self.rst_dir.joinpath("version_history", "current.rst").read_text() - if not self.version_number in version_current: - raise SphinxEnvError( - f"Git tag ({self.version_number}) not found in version_history/current.rst") - - def create_tarball(self) -> None: - with tarfile.open(self.output_filename, "w") as tar: - tar.add(self.html_dir, arcname=".") - - @runner.cleansup - def run(self): - os.environ["ENVOY_DOCS_BUILD_CONFIG"] = str(self.config_file) - try: - self.check_env() - except SphinxEnvError as e: - print(e) - return 1 - self.build_summary() - try: - self.build_html() - except SphinxBuildError as e: - print(e) - return 1 - self.create_tarball() - - def _color(self, msg, name=None): - return f"{self.colors[name or 'chrome']}{msg}{Style.RESET_ALL}" - - -def main(*args) -> int: - return SphinxRunner(*args).run() - - -if __name__ == "__main__": - sys.exit(main(*sys.argv[1:])) diff --git a/tools/docs/tests/test_rst_check.py b/tools/docs/tests/test_rst_check.py index a2bef68a71f0b..91e91ced246ce 100644 --- a/tools/docs/tests/test_rst_check.py +++ b/tools/docs/tests/test_rst_check.py @@ -249,7 +249,7 @@ def test_rst_check_current_version_check_ticks(patches, matches): m_re.return_value.match.return_value = matches assert ( version_file.check_ticks("LINE") - == (["Backticks should come in pairs (except for links and refs): LINE"] + == (["Backticks should come in pairs (``foo``) except for links (`title `_) or refs (ref:`text `): LINE"] if matches else [])) assert ( list(m_re.return_value.match.call_args) diff --git a/tools/docs/tests/test_sphinx_runner.py b/tools/docs/tests/test_sphinx_runner.py deleted file mode 100644 index ad6a3fe1f06c8..0000000000000 --- a/tools/docs/tests/test_sphinx_runner.py +++ /dev/null @@ -1,672 +0,0 @@ -from unittest.mock import MagicMock, PropertyMock - -import pytest - -from tools.docs import sphinx_runner - - -def test_sphinx_runner_constructor(): - runner = sphinx_runner.SphinxRunner() - assert runner._build_sha == "UNKNOWN" - assert "blob_dir" not in runner.__dict__ - - -@pytest.mark.parametrize("docs_tag", [None, "", "SOME_DOCS_TAG"]) -def test_sphinx_runner_blob_sha(patches, docs_tag): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.build_sha", dict(new_callable=PropertyMock)), - ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_sha, m_tag): - m_tag.return_value = docs_tag - if docs_tag: - assert runner.blob_sha == docs_tag - else: - assert runner.blob_sha == m_sha.return_value - assert "blob_sha" not in runner.__dict__ - - -def test_sphinx_runner_build_dir(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.tempdir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_temp): - assert runner.build_dir == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(m_temp.return_value.name, ), {}]) - assert "build_dir" not in runner.__dict__ - - -@pytest.mark.parametrize("build_sha", [None, "", "SOME_BUILD_SHA"]) -def test_sphinx_runner_build_sha(patches, build_sha): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_args, ): - m_args.return_value.build_sha = build_sha - if build_sha: - assert runner.build_sha == build_sha - else: - assert runner.build_sha == "UNKNOWN" - - assert "build_sha" not in runner.__dict__ - - -def test_sphinx_runner_colors(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "Fore", - prefix="tools.docs.sphinx_runner") - - with patched as (m_colors, ): - assert ( - runner.colors - == dict( - chrome=m_colors.LIGHTYELLOW_EX, - key=m_colors.LIGHTCYAN_EX, - value=m_colors.LIGHTMAGENTA_EX)) - - assert "colors" in runner.__dict__ - - -def test_sphinx_runner_config_file(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "utils", - ("SphinxRunner.config_file_path", dict(new_callable=PropertyMock)), - ("SphinxRunner.configs", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_utils, m_fpath, m_configs): - assert ( - runner.config_file - == m_utils.to_yaml.return_value) - - assert ( - list(m_utils.to_yaml.call_args) - == [(m_configs.return_value, m_fpath.return_value), {}]) - assert "config_file" in runner.__dict__ - - -def test_sphinx_runner_config_file_path(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_build, ): - assert runner.config_file_path == m_build.return_value.joinpath.return_value - - assert ( - list(m_build.return_value.joinpath.call_args) - == [('build.yaml',), {}]) - assert "config_file_path" not in runner.__dict__ - - -def test_sphinx_runner_configs(patches): - runner = sphinx_runner.SphinxRunner() - mapping = dict( - version_string="version_string", - release_level="release_level", - blob_sha="blob_sha", - version_number="version_number", - docker_image_tag_name="docker_image_tag_name", - validator_path="validator_path", - descriptor_path="descriptor_path") - - patched = patches( - *[f"SphinxRunner.{v}" for v in mapping.values()], - prefix="tools.docs.sphinx_runner") - - with patched as _mocks: - result = runner.configs - - _configs = {} - for k, v in mapping.items(): - _v = _mocks[list(mapping.values()).index(v)] - if k in ["validator_path", "descriptor_path"]: - _v = str(_v) - _configs[k] = _v - assert result == _configs - assert "configs" in runner.__dict__ - - -def test_sphinx_runner_descriptor_path(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert ( - runner.descriptor_path - == m_plib.Path.return_value) - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.descriptor_path,), {}]) - assert "descriptor_path" not in runner.__dict__ - - -def test_sphinx_runner_docker_image_tag_name(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "re", - ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_re, m_version): - assert ( - runner.docker_image_tag_name - == m_re.sub.return_value) - - assert ( - list(m_re.sub.call_args) - == [('([0-9]+\\.[0-9]+)\\.[0-9]+.*', 'v\\1-latest', - m_version.return_value), {}]) - assert "docker_image_tag_name" not in runner.__dict__ - - -def test_sphinx_runner_docs_tag(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_args, ): - assert runner.docs_tag == m_args.return_value.docs_tag - - assert "docs_tag" not in runner.__dict__ - - -def test_sphinx_runner_html_dir(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_build, ): - assert runner.html_dir == m_build.return_value.joinpath.return_value - - assert ( - list(m_build.return_value.joinpath.call_args) - == [('generated', 'html'), {}]) - assert "html_dir" in runner.__dict__ - - -def test_sphinx_runner_output_filename(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert runner.output_filename == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.output_filename, ), {}]) - assert "output_filename" not in runner.__dict__ - - -@pytest.mark.parametrize("major", [2, 3, 4]) -@pytest.mark.parametrize("minor", [5, 6, 7, 8, 9]) -def test_sphinx_runner_py_compatible(patches, major, minor): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "bool", - "sys", - prefix="tools.docs.sphinx_runner") - - with patched as (m_bool, m_sys): - m_sys.version_info.major = major - m_sys.version_info.minor = minor - assert runner.py_compatible == m_bool.return_value - expected = ( - True - if major == 3 and minor >= 8 - else False) - assert ( - list(m_bool.call_args) - == [(expected,), {}]) - assert "py_compatible" not in runner.__dict__ - - -@pytest.mark.parametrize("docs_tag", [None, "", "SOME_DOCS_TAG"]) -def test_sphinx_runner_release_level(patches, docs_tag): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_tag, ): - m_tag.return_value = docs_tag - if docs_tag: - assert runner.release_level == "tagged" - else: - assert runner.release_level == "pre-release" - assert "release_level" not in runner.__dict__ - - -@pytest.mark.parametrize("rst_tar", [None, "", "SOME_DOCS_TAG"]) -def test_sphinx_runner_rst_dir(patches, rst_tar): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - "utils", - ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), - ("SphinxRunner.rst_tar", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_utils, m_dir, m_rst): - m_rst.return_value = rst_tar - assert runner.rst_dir == m_dir.return_value.joinpath.return_value - - assert ( - list(m_dir.return_value.joinpath.call_args) - == [('generated', 'rst'), {}]) - - if rst_tar: - assert ( - list(m_utils.extract.call_args) - == [(m_dir.return_value.joinpath.return_value, rst_tar), {}]) - else: - assert not m_utils.extract.called - assert "rst_dir" in runner.__dict__ - - -def test_sphinx_runner_rst_tar(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert runner.rst_tar == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.rst_tar, ), {}]) - assert "rst_tar" not in runner.__dict__ - - -def test_sphinx_runner_sphinx_args(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.html_dir", dict(new_callable=PropertyMock)), - ("SphinxRunner.rst_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_html, m_rst): - assert ( - runner.sphinx_args - == ('-W', '--keep-going', '--color', '-b', 'html', - str(m_rst.return_value), - str(m_html.return_value))) - - assert "sphinx_args" not in runner.__dict__ - - -def test_sphinx_runner_validator_path(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert ( - runner.validator_path - == m_plib.Path.return_value) - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.validator_path,), {}]) - assert "validator_path" not in runner.__dict__ - - -def test_sphinx_runner_version_file(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert runner.version_file == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.version_file, ), {}]) - assert "version_file" not in runner.__dict__ - - -def test_sphinx_runner_version_number(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.version_file", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_file, ): - assert ( - runner.version_number - == m_file.return_value.read_text.return_value.strip.return_value) - - assert ( - list(m_file.return_value.read_text.call_args) - == [(), {}]) - assert ( - list(m_file.return_value.read_text.return_value.strip.call_args) - == [(), {}]) - - assert "version_number" in runner.__dict__ - - -@pytest.mark.parametrize("docs_tag", [None, "", "SOME_DOCS_TAG"]) -def test_sphinx_runner_version_string(patches, docs_tag): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), - ("SphinxRunner.build_sha", dict(new_callable=PropertyMock)), - ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_tag, m_sha, m_version): - m_tag.return_value = docs_tag - if docs_tag: - assert runner.version_string == f"tag-{docs_tag}" - else: - assert runner.version_string == f"{m_version.return_value}-{m_sha.return_value.__getitem__.return_value}" - assert ( - list(m_sha.return_value.__getitem__.call_args) - == [(slice(None, 6, None),), {}]) - - assert "version_string" not in runner.__dict__ - - -def test_sphinx_runner_add_arguments(patches): - runner = sphinx_runner.SphinxRunner() - parser = MagicMock() - patched = patches( - "runner.Runner.add_arguments", - prefix="tools.docs.sphinx_runner") - - with patched as (m_super, ): - runner.add_arguments(parser) - - assert ( - list(m_super.call_args) - == [(parser, ), {}]) - assert ( - list(list(c) for c in parser.add_argument.call_args_list) - == [[('--build_sha',), {}], - [('--docs_tag',), {}], - [('--version_file',), {}], - [('--validator_path',), {}], - [('--descriptor_path',), {}], - [('rst_tar',), {}], - [('output_filename',), {}]]) - - -@pytest.mark.parametrize("fails", [True, False]) -def test_sphinx_runner_build_html(patches, fails): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "sphinx_build", - ("SphinxRunner.sphinx_args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_sphinx, m_args): - m_sphinx.side_effect = lambda s: fails - e = None - if fails: - with pytest.raises(sphinx_runner.SphinxBuildError) as e: - runner.build_html() - else: - runner.build_html() - - assert ( - list(m_sphinx.call_args) - == [(m_args.return_value,), {}]) - - if fails: - assert e.value.args == ('BUILD FAILED',) - else: - assert not e - - -def test_sphinx_runner_build_summary(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "print", - "SphinxRunner._color", - ("SphinxRunner.configs", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_print, m_color, m_configs): - m_configs.return_value.items.return_value = (("a", "A"), ("b", "B")) - runner.build_summary() - - assert ( - list(list(c) for c in m_print.call_args_list) - == [[(), {}], - [(m_color.return_value,), {}], - [(m_color.return_value,), {}], - [(f"{m_color.return_value} {m_color.return_value}: {m_color.return_value}",), {}], - [(f"{m_color.return_value} {m_color.return_value}: {m_color.return_value}",), {}], - [(m_color.return_value,), {}], - [(m_color.return_value,), {}], - [(), {}]]) - assert ( - list(list(c) for c in m_color.call_args_list) - == [[('#### Sphinx build configs #####################',), {}], - [('###',), {}], - [('###',), {}], - [('a', 'key'), {}], - [('A', 'value'), {}], - [('###',), {}], - [('b', 'key'), {}], - [('B', 'value'), {}], - [('###',), {}], - [('###############################################',), {}]]) - - -@pytest.mark.parametrize("py_compat", [True, False]) -@pytest.mark.parametrize("release_level", ["pre-release", "tagged"]) -@pytest.mark.parametrize("version_number", ["1.17", "1.23", "1.43"]) -@pytest.mark.parametrize("docs_tag", ["v1.17", "v1.23", "v1.73"]) -@pytest.mark.parametrize("current", ["XXX v1.17 ZZZ", "AAA v1.23 VVV", "BBB v1.73 EEE"]) -def test_sphinx_runner_check_env(patches, py_compat, release_level, version_number, docs_tag, current): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "platform", - ("SphinxRunner.configs", dict(new_callable=PropertyMock)), - ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), - ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), - ("SphinxRunner.py_compatible", dict(new_callable=PropertyMock)), - ("SphinxRunner.rst_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - fails = ( - not py_compat - or (release_level == "tagged" - and (f"v{version_number}" != docs_tag - or version_number not in current))) - - with patched as (m_platform, m_configs, m_version, m_tag, m_py, m_rst): - m_py.return_value = py_compat - m_configs.return_value.__getitem__.return_value = release_level - m_version.return_value = version_number - m_tag.return_value = docs_tag - m_rst.return_value.joinpath.return_value.read_text.return_value = current - - if fails: - with pytest.raises(sphinx_runner.SphinxEnvError) as e: - runner.check_env() - else: - runner.check_env() - - if not py_compat: - assert ( - e.value.args - == ("ERROR: python version must be >= 3.8, " - f"you have {m_platform.python_version.return_value}", )) - return - - if release_level != "tagged": - return - - if f"v{version_number}" != docs_tag: - assert ( - e.value.args - == ("Given git tag does not match the VERSION file content:" - f"{docs_tag} vs v{version_number}", )) - return - - assert ( - list(m_rst.return_value.joinpath.call_args) - == [("version_history", "current.rst"), {}]) - - if version_number not in current: - assert ( - e.value.args - == (f"Git tag ({version_number}) not found in version_history/current.rst", )) - - -@pytest.mark.parametrize("exists", [True, False]) -def test_sphinx_runner_cleanup(patches, exists): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.tempdir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_temp, ): - if exists: - runner.__dict__["tempdir"] = m_temp.return_value - assert not runner.cleanup() - - assert not "tempdir" in runner.__dict__ - if exists: - assert ( - list(m_temp.return_value.cleanup.call_args) - == [(), {}]) - else: - assert not m_temp.called - - -def test_sphinx_runner_create_tarball(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "tarfile", - ("SphinxRunner.output_filename", dict(new_callable=PropertyMock)), - ("SphinxRunner.html_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_tar, m_out, m_html): - runner.create_tarball() - - assert ( - list(m_tar.open.call_args) - == [(m_out.return_value, 'w'), {}]) - assert ( - list(m_tar.open.return_value.__enter__.return_value.add.call_args) - == [(m_html.return_value,), {'arcname': '.'}]) - - -@pytest.mark.parametrize("check_fails", [True, False]) -@pytest.mark.parametrize("build_fails", [True, False]) -def test_sphinx_runner_run(patches, check_fails, build_fails): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "print", - "os", - "SphinxRunner.build_summary", - "SphinxRunner.check_env", - "SphinxRunner.build_html", - "SphinxRunner.create_tarball", - ("SphinxRunner.config_file", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - def _raise(error): - raise error - - assert runner.run.__wrapped__.__cleansup__ - - with patched as (m_print, m_os, m_summary, m_check, m_build, m_create, m_config): - if check_fails: - _check_error = sphinx_runner.SphinxEnvError("CHECK FAILED") - m_check.side_effect = lambda: _raise(_check_error) - if build_fails: - _build_error = sphinx_runner.SphinxBuildError("BUILD FAILED") - m_build.side_effect = lambda: _raise(_build_error) - assert runner.run() == (1 if (check_fails or build_fails) else None) - - assert ( - list(m_check.call_args) - == [(), {}]) - assert ( - list(m_os.environ.__setitem__.call_args) - == [('ENVOY_DOCS_BUILD_CONFIG', str(m_config.return_value)), {}]) - - if check_fails: - assert ( - list(m_print.call_args) - == [(_check_error,), {}]) - assert not m_summary.called - assert not m_build.called - assert not m_create.called - return - - assert ( - list(m_summary.call_args) - == [(), {}]) - assert ( - list(m_build.call_args) - == [(), {}]) - - if build_fails: - assert ( - list(m_print.call_args) - == [(_build_error,), {}]) - assert not m_create.called - return - - assert not m_print.called - assert ( - list(m_create.call_args) - == [(), {}]) - - -@pytest.mark.parametrize("color", [None, "COLOR"]) -def test_sphinx_runner__color(patches, color): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "Style", - ("SphinxRunner.colors", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_style, m_colors): - assert ( - runner._color("MSG", color) - == f"{m_colors.return_value.__getitem__.return_value}MSG{m_style.RESET_ALL}") - assert ( - list(m_colors.return_value.__getitem__.call_args) - == [(color or "chrome",), {}]) - - -def test_sphinx_runner_main(command_main): - command_main( - sphinx_runner.main, - "tools.docs.sphinx_runner.SphinxRunner") diff --git a/tools/extensions/BUILD b/tools/extensions/BUILD index 52147a0e6b446..96d5c41d46a2c 100644 --- a/tools/extensions/BUILD +++ b/tools/extensions/BUILD @@ -1,6 +1,7 @@ load("//bazel:envoy_build_system.bzl", "envoy_package") load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//tools/base:envoy_python.bzl", "envoy_py_binary") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 @@ -16,7 +17,7 @@ envoy_py_binary( "//test/extensions/filters/network/common/fuzz:uber_per_readfilter.cc", ] + envoy_all_extensions(), deps = [ - "//tools/base:checker", - "//tools/base:utils", + requirement("envoy.base.checker"), + requirement("envoy.base.utils"), ], ) diff --git a/tools/extensions/extensions_check.py b/tools/extensions/extensions_check.py index 981b05b514280..80c348ae63a9e 100644 --- a/tools/extensions/extensions_check.py +++ b/tools/extensions/extensions_check.py @@ -11,7 +11,7 @@ from importlib.machinery import ModuleSpec, SourceFileLoader from typing import Iterator -from tools.base import checker, utils +from envoy.base import checker, utils BUILD_CONFIG_PATH = "source/extensions/extensions_build_config.bzl" CONTRIB_BUILD_CONFIG_PATH = "contrib/contrib_build_config.bzl" @@ -49,12 +49,13 @@ "envoy.formatter", "envoy.grpc_credentials", "envoy.guarddog_actions", "envoy.health_checkers", "envoy.http.stateful_header_formatters", "envoy.internal_redirect_predicates", "envoy.io_socket", "envoy.http.original_ip_detection", "envoy.matching.common_inputs", - "envoy.matching.input_matchers", "envoy.quic.proof_source", "envoy.quic.server.crypto_stream", - "envoy.rate_limit_descriptors", "envoy.request_id", "envoy.resource_monitors", - "envoy.retry_host_predicates", "envoy.retry_priorities", "envoy.stats_sinks", - "envoy.thrift_proxy.filters", "envoy.tracers", "envoy.transport_sockets.downstream", - "envoy.transport_sockets.upstream", "envoy.tls.cert_validator", "envoy.upstreams", - "envoy.wasm.runtime", "envoy.common.key_value") + "envoy.matching.input_matchers", "envoy.tls.key_providers", "envoy.quic.proof_source", + "envoy.quic.server.crypto_stream", "envoy.rate_limit_descriptors", "envoy.request_id", + "envoy.resource_monitors", "envoy.retry_host_predicates", "envoy.retry_priorities", + "envoy.stats_sinks", "envoy.thrift_proxy.filters", "envoy.tracers", "envoy.sip_proxy.filters", + "envoy.transport_sockets.downstream", "envoy.transport_sockets.upstream", + "envoy.tls.cert_validator", "envoy.upstreams", "envoy.wasm.runtime", "envoy.common.key_value", + "envoy.rbac.matchers") EXTENSION_STATUS_VALUES = ( # This extension is stable and is expected to be production usable. diff --git a/tools/gen_compilation_database.py b/tools/gen_compilation_database.py index 8c13694b15d2d..46d23fdef933c 100755 --- a/tools/gen_compilation_database.py +++ b/tools/gen_compilation_database.py @@ -71,7 +71,10 @@ def modify_compile_command(target, args): if is_header(target["file"]): options += " -Wno-pragma-once-outside-header -Wno-unused-const-variable" options += " -Wno-unused-function" - if not target["file"].startswith("external/"): + # By treating external/envoy* as C++ files we are able to use this script from subrepos that + # depend on Envoy targets. + if not target["file"].startswith("external/") or target["file"].startswith( + "external/envoy"): # *.h file is treated as C header by default while our headers files are all C++17. options = "-x c++ -std=c++17 -fexceptions " + options diff --git a/tools/print_dependencies.py b/tools/print_dependencies.py index c8c4eec0f284c..966ee0a15cc54 100755 --- a/tools/print_dependencies.py +++ b/tools/print_dependencies.py @@ -2,14 +2,15 @@ # Quick-and-dirty python to fetch dependency information -import imp +import importlib import json import re import subprocess import sys -API_DEPS = imp.load_source('api', 'api/bazel/repository_locations.bzl') -DEPS = imp.load_source('deps', 'bazel/repository_locations.bzl') +API_DEPS = importlib.machinery.SourceFileLoader('api', + 'api/bazel/repository_locations.bzl').load_module() +DEPS = importlib.machinery.SourceFileLoader('deps', 'bazel/repository_locations.bzl').load_module() def print_deps(deps): @@ -19,14 +20,14 @@ def print_deps(deps): if __name__ == '__main__': deps = [] - DEPS.REPOSITORY_LOCATIONS.update(API_DEPS.REPOSITORY_LOCATIONS) + DEPS.REPOSITORY_LOCATIONS_SPEC.update(API_DEPS.REPOSITORY_LOCATIONS_SPEC) - for key, loc in DEPS.REPOSITORY_LOCATIONS.items(): + for key, loc in DEPS.REPOSITORY_LOCATIONS_SPEC.items(): deps.append({ 'identifier': key, - 'file-sha256': loc.get('sha256'), - 'file-url': loc.get('urls')[0], - 'file-prefix': loc.get('strip_prefix', ''), + 'description': loc.get('project_desc'), + 'project': loc.get('project_url'), + 'version': loc.get("version"), }) deps = sorted(deps, key=lambda k: k['identifier']) diff --git a/tools/proto_format/proto_format.sh b/tools/proto_format/proto_format.sh index e80dab257f55b..d6ae826618efa 100755 --- a/tools/proto_format/proto_format.sh +++ b/tools/proto_format/proto_format.sh @@ -35,13 +35,13 @@ if [[ "$1" == "freeze" ]]; then fi # Invoke protoxform aspect. -bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_db_target=@envoy_api_canonical//versioning:active_protos ${FREEZE_ARG} \ - @envoy_api_canonical//versioning:active_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto +bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_db_target=@envoy_api//versioning:active_protos ${FREEZE_ARG} \ + @envoy_api//versioning:active_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto # Find all source protos. PROTO_TARGETS=() for proto_type in active frozen; do - protos=$(bazel query "labels(srcs, labels(deps, @envoy_api_canonical//versioning:${proto_type}_protos))") + protos=$(bazel query "labels(srcs, labels(deps, @envoy_api//versioning:${proto_type}_protos))") while read -r line; do PROTO_TARGETS+=("$line"); done \ <<< "$protos" done @@ -50,12 +50,11 @@ done TOOLS="$(dirname "$(dirname "$(realpath "$0")")")" # To satisfy dependency on api_proto_plugin. export PYTHONPATH="$TOOLS" -# Build protoprint and merge_active_shadow_tools for use in proto_sync.py. -bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint //tools/protoxform:merge_active_shadow +# Build protoprint for use in proto_sync.py. +bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint # Copy back the FileDescriptorProtos that protoxform emitted to the source tree. This involves -# pretty-printing to format with protoprint and potentially merging active/shadow versions of protos -# with merge_active_shadow. +# pretty-printing to format with protoprint. ./tools/proto_format/proto_sync.py "--mode=${PROTO_SYNC_CMD}" "${PROTO_TARGETS[@]}" --ci # Need to regenerate //versioning:active_protos before building type DB below if freezing. @@ -66,7 +65,3 @@ fi # Generate api/BUILD file based on updated type database. bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/type_whisperer:api_build_file cp -f bazel-bin/tools/type_whisperer/BUILD.api_build_file api/BUILD - -# Misc. manual copies to keep generated_api_shadow/ in sync with api/. -cp -f ./api/bazel/*.bzl ./api/bazel/BUILD ./generated_api_shadow/bazel -cp -f ./api/BUILD ./generated_api_shadow/ diff --git a/tools/proto_format/proto_sync.py b/tools/proto_format/proto_sync.py index b3ea6686b6be9..8d878309c6982 100755 --- a/tools/proto_format/proto_sync.py +++ b/tools/proto_format/proto_sync.py @@ -1,10 +1,6 @@ #!/usr/bin/env python3 # 1. Take protoxform artifacts from Bazel cache and pretty-print with protoprint.py. -# 2. In the case where we are generating an Envoy internal shadow, it may be -# necessary to combine the current active proto, subject to hand editing, with -# shadow artifacts from the previous version; this is done via -# merge_active_shadow.py. # 3. Diff or copy resulting artifacts to the source tree. import argparse @@ -184,6 +180,13 @@ def get_destination_path(src): package)) dst_path = pathlib.Path('contrib').joinpath(dst_path) + # Non-contrib can not use alpha. + if not 'contrib' in src: + if not 'v2alpha' in package and 'alpha' in package: + raise ProtoSyncError( + "package '{}' uses an alpha namespace. This is not allowed. Instead mark with " + "(xds.annotations.v3.file_status).work_in_progress or related annotation.".format( + package)) return dst_path @@ -218,31 +221,9 @@ def proto_print(src, dst): ]) -def merge_active_shadow(active_src, shadow_src, dst): - """Merge active/shadow FileDescriptorProto to a destination file. - - Args: - active_src: source path for active FileDescriptorProto. - shadow_src: source path for active FileDescriptorProto. - dst: destination path for FileDescriptorProto. - """ - print('merge_active_shadow %s' % dst) - subprocess.check_output([ - 'bazel-bin/tools/protoxform/merge_active_shadow', - active_src, - shadow_src, - dst, - ]) - - def sync_proto_file(dst_srcs): """Pretty-print a proto descriptor from protoxform.py Bazel cache artifacts." - In the case where we are generating an Envoy internal shadow, it may be - necessary to combine the current active proto, subject to hand editing, with - shadow artifacts from the previous verion; this is done via - merge_active_shadow(). - Args: dst_srcs: destination/sources path tuple. """ @@ -256,19 +237,8 @@ def sync_proto_file(dst_srcs): # We should only see an active and next major version candidate from # previous version today. assert (len(srcs) == 2) - shadow_srcs = [ - s for s in srcs if s.endswith('.next_major_version_candidate.envoy_internal.proto') - ] active_src = [s for s in srcs if s.endswith('active_or_frozen.proto')][0] - # If we're building the shadow, we need to combine the next major version - # candidate shadow with the potentially hand edited active version. - if len(shadow_srcs) > 0: - assert (len(shadow_srcs) == 1) - with tempfile.NamedTemporaryFile() as f: - merge_active_shadow(active_src, shadow_srcs[0], f.name) - proto_print(f.name, dst) - else: - proto_print(active_src, dst) + proto_print(active_src, dst) src = active_src rel_dst_path = get_destination_path(src) return ['//%s:pkg' % str(rel_dst_path.parent)] @@ -299,7 +269,11 @@ def get_import_deps(proto_path): if import_path.startswith('xds/type/matcher/v3/'): imports.append('@com_github_cncf_udpa//xds/type/matcher/v3:pkg') continue - # Special case handling for UDPA core. + # Special case for handling XDS annotations. + if import_path.startswith('xds/annotations/v3/'): + imports.append('@com_github_cncf_udpa//xds/annotations/v3:pkg') + continue + # Special case handling for XDS core. if import_path.startswith('xds/core/v3/'): imports.append('@com_github_cncf_udpa//xds/core/v3:pkg') continue @@ -477,7 +451,7 @@ def should_sync(path, api_proto_modified_files, py_tools_modified_files): return False -def sync(api_root, mode, is_ci, labels, shadow): +def sync(api_root, mode, is_ci, labels): api_proto_modified_files = git_modified_files('api', 'proto') py_tools_modified_files = git_modified_files('tools', 'py') with tempfile.TemporaryDirectory() as tmp: @@ -487,8 +461,7 @@ def sync(api_root, mode, is_ci, labels, shadow): paths.append(utils.bazel_bin_path_for_output_artifact(label, '.active_or_frozen.proto')) paths.append( utils.bazel_bin_path_for_output_artifact( - label, '.next_major_version_candidate.envoy_internal.proto' - if shadow else '.next_major_version_candidate.proto')) + label, '.next_major_version_candidate.proto')) dst_src_paths = defaultdict(list) for path in paths: if os.path.exists(path) and os.stat(path).st_size > 0: @@ -556,10 +529,8 @@ def sync(api_root, mode, is_ci, labels, shadow): parser = argparse.ArgumentParser() parser.add_argument('--mode', choices=['check', 'fix']) parser.add_argument('--api_root', default='./api') - parser.add_argument('--api_shadow_root', default='./generated_api_shadow') parser.add_argument('--ci', action="store_true", default=False) parser.add_argument('labels', nargs='*') args = parser.parse_args() - sync(args.api_root, args.mode, args.ci, args.labels, False) - sync(args.api_shadow_root, args.mode, args.ci, args.labels, True) + sync(args.api_root, args.mode, args.ci, args.labels) diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 4eb0e6f5cf9ec..c880490a1a268 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -32,16 +32,17 @@ py_binary( deps = [ ":manifest_proto_py_proto", "//tools/api_proto_plugin", - "//tools/base:utils", "//tools/config_validation:validate_fragment", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", + "@com_github_cncf_udpa//xds/annotations/v3:pkg_py_proto", "@com_google_protobuf//:protobuf_python", + requirement("envoy.base.utils"), requirement("Jinja2"), ], ) protodoc_rule( name = "api_v3_protodoc", - deps = ["@envoy_api_canonical//:v3_protos"], + deps = ["@envoy_api//:v3_protos"], ) diff --git a/tools/protodoc/protodoc.bzl b/tools/protodoc/protodoc.bzl index 751a24bc44846..9858d190efe28 100644 --- a/tools/protodoc/protodoc.bzl +++ b/tools/protodoc/protodoc.bzl @@ -17,10 +17,15 @@ protodoc_aspect = api_proto_plugin_aspect("//tools/protodoc", _protodoc_impl) def _protodoc_rule_impl(ctx): return [ DefaultInfo( - files = depset(transitive = [ - d[OutputGroupInfo].rst - for d in ctx.attr.deps - ]), + files = depset( + transitive = [ + depset([ + x + for x in ctx.attr.deps[0][OutputGroupInfo].rst.to_list() + if x.short_path.startswith("../envoy_api") + ]), + ], + ), ), ] diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index a8d45c7ccd1e5..1057becbd1007 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -21,22 +21,24 @@ # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] +from envoy.base import utils + from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor -from tools.base import utils from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 -from udpa.annotations import status_pb2 +from udpa.annotations import status_pb2 as udpa_status_pb2 from validate import validate_pb2 +from xds.annotations.v3 import status_pb2 as xds_status_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Last documented v2 api version -ENVOY_LAST_V2_VERSION = "1.17.2" +ENVOY_LAST_V2_VERSION = "1.17" # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' @@ -124,6 +126,13 @@ 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } +WIP_WARNING = ( + '.. warning::\n This API feature is currently work-in-progress. API features marked as ' + 'work-in-progress are not considered stable, are not covered by the :ref:`threat model ' + '`, are not supported by the security team, and are subject to ' + 'breaking changes. Do not use this feature without understanding each of the previous ' + 'points.\n\n') + r = runfiles.Create() EXTENSION_DB = utils.from_yaml(r.Rlocation("envoy/source/extensions/extensions_metadata.yaml")) @@ -176,22 +185,19 @@ def github_url(text, type_context): return f":repo:`{text} `" -def format_comment_with_annotations(comment, type_name=''): +def format_comment_with_annotations(comment, show_wip_warning=False): """Format a comment string with additional RST for annotations. Args: comment: comment string. - type_name: optional, 'message' or 'enum' may be specified for additional - message/enum specific annotations. + show_wip_warning: whether to show the work in progress warning. Returns: A string with additional RST from annotations. """ - alpha_warning = '' - if annotations.ALPHA_ANNOTATION in comment.annotations: - experimental_warning = ( - '.. warning::\n This API is alpha and is not covered by the :ref:`threat model `.\n\n' - ) + wip_warning = '' + if show_wip_warning: + wip_warning = WIP_WARNING formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: @@ -202,7 +208,7 @@ def format_comment_with_annotations(comment, type_name=''): for category in comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION].split(","): formatted_extension_category += format_extension_category(category) comment = annotations.without_annotations(strip_leading_space(comment.raw) + '\n') - return alpha_warning + comment + formatted_extension + formatted_extension_category + return comment + wip_warning + formatted_extension + formatted_extension_category def map_lines(f, s): @@ -558,7 +564,10 @@ def format_field_as_definition_list_item( or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment - formatted_leading_comment = format_comment_with_annotations(leading_comment) + formatted_leading_comment = format_comment_with_annotations( + leading_comment, + field.options.HasExtension(xds_status_pb2.field_status) + and field.options.Extensions[xds_status_pb2.field_status].work_in_progress) if hide_not_implemented(leading_comment): return '' @@ -710,7 +719,7 @@ def visit_enum(self, enum_proto, type_context): header = format_header('-', 'Enum %s' % normal_enum_type) proto_link = github_url(f"[{normal_enum_type} proto]", type_context) + '\n\n' leading_comment = type_context.leading_comment - formatted_leading_comment = format_comment_with_annotations(leading_comment, 'enum') + formatted_leading_comment = format_comment_with_annotations(leading_comment) if hide_not_implemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + format_enum_as_definition_list( @@ -725,7 +734,10 @@ def visit_message(self, msg_proto, type_context, nested_msgs, nested_enums): header = format_header('-', normal_msg_type) proto_link = github_url(f"[{normal_msg_type} proto]", type_context) + '\n\n' leading_comment = type_context.leading_comment - formatted_leading_comment = format_comment_with_annotations(leading_comment, 'message') + formatted_leading_comment = format_comment_with_annotations( + leading_comment, + msg_proto.options.HasExtension(xds_status_pb2.message_status) + and msg_proto.options.Extensions[xds_status_pb2.message_status].work_in_progress) if hide_not_implemented(leading_comment): return '' @@ -766,11 +778,14 @@ def visit_file(self, file_proto, type_context, services, msgs, enums): if not has_messages: header = ':orphan:\n\n' + header warnings = '' - if file_proto.options.HasExtension(status_pb2.file_status): - if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: - warnings += ( - '.. warning::\n This API is work-in-progress and is ' - 'subject to breaking changes.\n\n') + added_wip_warning = False + if file_proto.options.HasExtension(udpa_status_pb2.file_status): + if file_proto.options.Extensions[udpa_status_pb2.file_status].work_in_progress: + added_wip_warning = True + warnings += WIP_WARNING + if not added_wip_warning and file_proto.options.HasExtension(xds_status_pb2.file_status): + if file_proto.options.Extensions[xds_status_pb2.file_status].work_in_progress: + warnings += WIP_WARNING # debug_proto = format_proto_as_block_comment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto diff --git a/tools/protoxform/BUILD b/tools/protoxform/BUILD index 5473b9d31d24c..d76f73cb4787c 100644 --- a/tools/protoxform/BUILD +++ b/tools/protoxform/BUILD @@ -1,38 +1,10 @@ -load("@rules_python//python:defs.bzl", "py_binary", "py_test") +load("@rules_python//python:defs.bzl", "py_binary") licenses(["notice"]) # Apache 2 -py_binary( - name = "merge_active_shadow", - srcs = [ - "merge_active_shadow.py", - "utils.py", - ], - deps = [ - "//tools/api_proto_plugin", - "//tools/type_whisperer:api_type_db_proto_py_proto", - "@com_envoyproxy_protoc_gen_validate//validate:validate_py", - "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", - "@com_google_googleapis//google/api:annotations_py_proto", - "@com_google_protobuf//:protobuf_python", - "@envoy_api_canonical//envoy/annotations:pkg_py_proto", - ], -) - -py_test( - name = "merge_active_shadow_test", - srcs = ["merge_active_shadow_test.py"], - deps = [ - ":merge_active_shadow", - "//tools/api_proto_plugin", - "@com_google_protobuf//:protobuf_python", - ], -) - py_binary( name = "protoxform", srcs = [ - "migrate.py", "options.py", "protoxform.py", "utils.py", @@ -43,8 +15,9 @@ py_binary( "//tools/type_whisperer:api_type_db_proto_py_proto", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", + "@com_github_cncf_udpa//xds/annotations/v3:pkg_py_proto", "@com_google_googleapis//google/api:annotations_py_proto", - "@envoy_api_canonical//envoy/annotations:pkg_py_proto", + "@envoy_api//envoy/annotations:pkg_py_proto", ], ) @@ -67,8 +40,9 @@ py_binary( "//tools/type_whisperer:api_type_db_proto_py_proto", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", + "@com_github_cncf_udpa//xds/annotations/v3:pkg_py_proto", "@com_google_googleapis//google/api:annotations_py_proto", "@com_google_protobuf//:protobuf_python", - "@envoy_api_canonical//envoy/annotations:pkg_py_proto", + "@envoy_api//envoy/annotations:pkg_py_proto", ], ) diff --git a/tools/protoxform/merge_active_shadow.py b/tools/protoxform/merge_active_shadow.py deleted file mode 100644 index fec1be4922647..0000000000000 --- a/tools/protoxform/merge_active_shadow.py +++ /dev/null @@ -1,239 +0,0 @@ -# Merge active and previous version's generated next major version candidate -# shadow. This involve simultaneously traversing both FileDescriptorProtos and: -# 1. Recovering hidden_envoy_deprecated_* fields and enum values in active proto. -# 2. Recovering deprecated (sub)message types. -# 3. Misc. fixups for oneof metadata and reserved ranges/names. - -from collections import defaultdict -import copy -import pathlib -import sys - -from tools.api_proto_plugin import type_context as api_type_context -from tools.protoxform import utils - -from google.protobuf import descriptor_pb2, text_format -from envoy.annotations import deprecation_pb2 - -PROTO_PACKAGES = ( - "google.api.annotations", "validate.validate", "envoy.annotations.deprecation", - "envoy.annotations.resource", "udpa.annotations.migrate", "udpa.annotations.security", - "udpa.annotations.status", "udpa.annotations.sensitive", "udpa.annotations.versioning") - - -# Set reserved_range in target_proto to reflect previous_reserved_range skipping -# skip_reserved_numbers. -def adjust_reserved_range(target_proto, previous_reserved_range, skip_reserved_numbers): - del target_proto.reserved_range[:] - for rr in previous_reserved_range: - # We can only handle singleton ranges today. - assert ((rr.start == rr.end) or (rr.end == rr.start + 1)) - if rr.start not in skip_reserved_numbers: - target_proto.reserved_range.add().MergeFrom(rr) - - -# Add dependencies for envoy.annotations.disallowed_by_default -def add_deprecation_dependencies(target_proto_dependencies, proto_field, is_enum): - if is_enum: - if proto_field.options.HasExtension(deprecation_pb2.disallowed_by_default_enum) and \ - "envoy/annotations/deprecation.proto" not in target_proto_dependencies: - target_proto_dependencies.append("envoy/annotations/deprecation.proto") - else: - if proto_field.options.HasExtension(deprecation_pb2.disallowed_by_default) and \ - "envoy/annotations/deprecation.proto" not in target_proto_dependencies: - target_proto_dependencies.append("envoy/annotations/deprecation.proto") - if proto_field.type_name == ".google.protobuf.Struct" and \ - "google/protobuf/struct.proto" not in target_proto_dependencies: - target_proto_dependencies.append("google/protobuf/struct.proto") - - -# Merge active/shadow EnumDescriptorProtos to a fresh target EnumDescriptorProto. -def merge_active_shadow_enum(active_proto, shadow_proto, target_proto, target_proto_dependencies): - target_proto.MergeFrom(active_proto) - if not shadow_proto: - return - shadow_values = {v.name: v for v in shadow_proto.value} - skip_reserved_numbers = [] - # For every reserved name, check to see if it's in the shadow, and if so, - # reintroduce in target_proto. - del target_proto.reserved_name[:] - for n in active_proto.reserved_name: - hidden_n = 'hidden_envoy_deprecated_' + n - if hidden_n in shadow_values: - v = shadow_values[hidden_n] - add_deprecation_dependencies(target_proto_dependencies, v, True) - skip_reserved_numbers.append(v.number) - target_proto.value.add().MergeFrom(v) - else: - target_proto.reserved_name.append(n) - adjust_reserved_range(target_proto, active_proto.reserved_range, skip_reserved_numbers) - # Special fixup for deprecation of default enum values. - for tv in target_proto.value: - if tv.name == 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE': - for sv in shadow_proto.value: - if sv.number == tv.number: - assert (sv.number == 0) - tv.CopyFrom(sv) - - -# Adjust source code info comments path to reflect insertions of oneof fields -# inside the middle of an existing collection of fields. -def adjust_source_code_info(type_context, field_index, field_adjustment): - - def has_path_prefix(s, t): - return len(s) <= len(t) and all(p[0] == p[1] for p in zip(s, t)) - - for loc in type_context.source_code_info.proto.location: - if has_path_prefix(type_context.path + [2], loc.path): - path_field_index = len(type_context.path) + 1 - if path_field_index < len(loc.path) and loc.path[path_field_index] >= field_index: - loc.path[path_field_index] += field_adjustment - - -# Merge active/shadow DescriptorProtos to a fresh target DescriptorProto. -def merge_active_shadow_message( - type_context, active_proto, shadow_proto, target_proto, target_proto_dependencies): - target_proto.MergeFrom(active_proto) - if not shadow_proto: - return - shadow_fields = {f.name: f for f in shadow_proto.field} - skip_reserved_numbers = [] - # For every reserved name, check to see if it's in the shadow, and if so, - # reintroduce in target_proto. We track both the normal fields we need to add - # back in (extra_simple_fields) and those that belong to oneofs - # (extra_oneof_fields). The latter require special treatment, as we can't just - # append them to the end of the message, they need to be reordered. - extra_simple_fields = [] - extra_oneof_fields = defaultdict(list) # oneof index -> list of fields - del target_proto.reserved_name[:] - for n in active_proto.reserved_name: - hidden_n = 'hidden_envoy_deprecated_' + n - if hidden_n in shadow_fields: - f = shadow_fields[hidden_n] - add_deprecation_dependencies(target_proto_dependencies, f, False) - skip_reserved_numbers.append(f.number) - missing_field = copy.deepcopy(f) - # oneof fields from the shadow need to have their index set to the - # corresponding index in active/target_proto. - if missing_field.HasField('oneof_index'): - oneof_name = shadow_proto.oneof_decl[missing_field.oneof_index].name - missing_oneof_index = None - for oneof_index, oneof_decl in enumerate(target_proto.oneof_decl): - if oneof_decl.name == oneof_name: - missing_oneof_index = oneof_index - if missing_oneof_index is None: - missing_oneof_index = len(target_proto.oneof_decl) - target_proto.oneof_decl.add().MergeFrom( - shadow_proto.oneof_decl[missing_field.oneof_index]) - missing_field.oneof_index = missing_oneof_index - extra_oneof_fields[missing_oneof_index].append(missing_field) - else: - extra_simple_fields.append(missing_field) - else: - target_proto.reserved_name.append(n) - # Copy existing fields, as we need to nuke them. - existing_fields = copy.deepcopy(target_proto.field) - del target_proto.field[:] - # Rebuild fields, taking into account extra_oneof_fields. protoprint.py - # expects that oneof fields are consecutive, so need to sort for this. - current_oneof_index = None - - def append_extra_oneof_fields(current_oneof_index, last_oneof_field_index): - # Add fields from extra_oneof_fields for current_oneof_index. - for oneof_f in extra_oneof_fields[current_oneof_index]: - target_proto.field.add().MergeFrom(oneof_f) - field_adjustment = len(extra_oneof_fields[current_oneof_index]) - # Fixup the comments in source code info. Note that this is really - # inefficient, O(N^2) in the worst case, but since we have relatively few - # deprecated fields, is the easiest to implement method. - if last_oneof_field_index is not None: - adjust_source_code_info(type_context, last_oneof_field_index, field_adjustment) - del extra_oneof_fields[current_oneof_index] - return field_adjustment - - field_index = 0 - for f in existing_fields: - if current_oneof_index is not None: - field_oneof_index = f.oneof_index if f.HasField('oneof_index') else None - # Are we exiting the oneof? If so, add the respective extra_one_fields. - if field_oneof_index != current_oneof_index: - field_index += append_extra_oneof_fields(current_oneof_index, field_index) - current_oneof_index = field_oneof_index - elif f.HasField('oneof_index'): - current_oneof_index = f.oneof_index - target_proto.field.add().MergeFrom(f) - field_index += 1 - if current_oneof_index is not None: - # No need to adjust source code info here, since there are no comments for - # trailing deprecated fields, so just set field index to None. - append_extra_oneof_fields(current_oneof_index, None) - # Non-oneof fields are easy to treat, we just append them to the existing - # fields. They don't get any comments, but that's fine in the generated - # shadows. - for f in extra_simple_fields: - target_proto.field.add().MergeFrom(f) - for oneof_index in sorted(extra_oneof_fields.keys()): - for f in extra_oneof_fields[oneof_index]: - target_proto.field.add().MergeFrom(f) - # Same is true for oneofs that are exclusively from the shadow. - adjust_reserved_range(target_proto, active_proto.reserved_range, skip_reserved_numbers) - # Visit nested message types - del target_proto.nested_type[:] - shadow_msgs = {msg.name: msg for msg in shadow_proto.nested_type} - for index, msg in enumerate(active_proto.nested_type): - merge_active_shadow_message( - type_context.extend_nested_message(index, msg.name, msg.options.deprecated), msg, - shadow_msgs.get(msg.name), target_proto.nested_type.add(), target_proto_dependencies) - # Visit nested enum types - del target_proto.enum_type[:] - shadow_enums = {msg.name: msg for msg in shadow_proto.enum_type} - for enum in active_proto.enum_type: - merge_active_shadow_enum( - enum, shadow_enums.get(enum.name), target_proto.enum_type.add(), - target_proto_dependencies) - # Ensure target has any deprecated sub-message types in case they are needed. - active_msg_names = set([msg.name for msg in active_proto.nested_type]) - for msg in shadow_proto.nested_type: - if msg.name not in active_msg_names: - target_proto.nested_type.add().MergeFrom(msg) - - -# Merge active/shadow FileDescriptorProtos, returning the resulting FileDescriptorProto. -def merge_active_shadow_file(active_file_proto, shadow_file_proto): - target_file_proto = copy.deepcopy(active_file_proto) - source_code_info = api_type_context.SourceCodeInfo( - target_file_proto.name, target_file_proto.source_code_info) - package_type_context = api_type_context.TypeContext(source_code_info, target_file_proto.package) - # Visit message types - del target_file_proto.message_type[:] - shadow_msgs = {msg.name: msg for msg in shadow_file_proto.message_type} - for index, msg in enumerate(active_file_proto.message_type): - merge_active_shadow_message( - package_type_context.extend_message(index, msg.name, msg.options.deprecated), msg, - shadow_msgs.get(msg.name), target_file_proto.message_type.add(), - target_file_proto.dependency) - # Visit enum types - del target_file_proto.enum_type[:] - shadow_enums = {msg.name: msg for msg in shadow_file_proto.enum_type} - for enum in active_file_proto.enum_type: - merge_active_shadow_enum( - enum, shadow_enums.get(enum.name), target_file_proto.enum_type.add(), - target_file_proto.dependency) - # Ensure target has any deprecated message types in case they are needed. - active_msg_names = set([msg.name for msg in active_file_proto.message_type]) - for msg in shadow_file_proto.message_type: - if msg.name not in active_msg_names: - target_file_proto.message_type.add().MergeFrom(msg) - return target_file_proto - - -if __name__ == '__main__': - active_src, shadow_src, dst = sys.argv[1:] - - utils.load_protos(PROTO_PACKAGES) - - active_proto = descriptor_pb2.FileDescriptorProto() - text_format.Merge(pathlib.Path(active_src).read_text(), active_proto) - shadow_proto = descriptor_pb2.FileDescriptorProto() - text_format.Merge(pathlib.Path(shadow_src).read_text(), shadow_proto) - pathlib.Path(dst).write_text(str(merge_active_shadow_file(active_proto, shadow_proto))) diff --git a/tools/protoxform/merge_active_shadow_test.py b/tools/protoxform/merge_active_shadow_test.py deleted file mode 100644 index c15af85918828..0000000000000 --- a/tools/protoxform/merge_active_shadow_test.py +++ /dev/null @@ -1,590 +0,0 @@ -import unittest - -import merge_active_shadow - -from tools.api_proto_plugin import type_context as api_type_context -from tools.protoxform import utils - -from google.protobuf import descriptor_pb2 -from google.protobuf import text_format - - -class MergeActiveShadowTest(unittest.TestCase): - # Dummy type context for tests that don't care about this. - def fake_type_context(self): - fake_source_code_info = descriptor_pb2.SourceCodeInfo() - source_code_info = api_type_context.SourceCodeInfo('fake', fake_source_code_info) - return api_type_context.TypeContext(source_code_info, 'fake_package') - - # Poor man's text proto equivalence. Tensorflow has better tools for this, - # i.e. assertProto2Equal. - def assert_text_proto_eq(self, lhs, rhs): - self.assertMultiLineEqual(lhs.strip(), rhs.strip()) - - def testadjust_reserved_range(self): - """adjust_reserved_range removes specified skip_reserved_numbers.""" - desc_pb_text = """ -reserved_range { - start: 41 - end: 41 -} -reserved_range { - start: 42 - end: 42 -} -reserved_range { - start: 43 - end: 44 -} -reserved_range { - start: 50 - end: 51 -} - """ - desc = descriptor_pb2.DescriptorProto() - text_format.Merge(desc_pb_text, desc) - target = descriptor_pb2.DescriptorProto() - merge_active_shadow.adjust_reserved_range(target, desc.reserved_range, [42, 43]) - target_pb_text = """ -reserved_range { - start: 41 - end: 41 -} -reserved_range { - start: 50 - end: 51 -} - """ - self.assert_text_proto_eq(target_pb_text, str(target)) - - def testmerge_active_shadow_enum(self): - """merge_active_shadow_enum recovers shadow values.""" - active_pb_text = """ -value { - number: 1 - name: "foo" -} -value { - number: 0 - name: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE" -} -value { - number: 3 - name: "bar" -} -reserved_name: "baz" -reserved_range { - start: 2 - end: 3 -} - """ - active_proto = descriptor_pb2.EnumDescriptorProto() - text_format.Merge(active_pb_text, active_proto) - shadow_pb_text = """ -value { - number: 1 - name: "foo" -} -value { - number: 0 - name: "wow" -} -value { - number: 3 - name: "bar" -} -value { - number: 2 - name: "hidden_envoy_deprecated_baz" -} -value { - number: 4 - name: "hidden_envoy_deprecated_huh" -} - """ - shadow_proto = descriptor_pb2.EnumDescriptorProto() - text_format.Merge(shadow_pb_text, shadow_proto) - target_proto = descriptor_pb2.EnumDescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_enum( - active_proto, shadow_proto, target_proto, target_proto_dependencies) - target_pb_text = """ -value { - name: "foo" - number: 1 -} -value { - name: "wow" - number: 0 -} -value { - name: "bar" - number: 3 -} -value { - name: "hidden_envoy_deprecated_baz" - number: 2 -} - """ - self.assert_text_proto_eq(target_pb_text, str(target_proto)) - - def testmerge_active_shadow_message_comments(self): - """merge_active_shadow_message preserves comment field correspondence.""" - active_pb_text = """ -field { - number: 9 - name: "oneof_1_0" - oneof_index: 0 -} -field { - number: 1 - name: "simple_field_0" -} -field { - number: 0 - name: "oneof_2_0" - oneof_index: 2 -} -field { - number: 8 - name: "oneof_2_1" - oneof_index: 2 -} -field { - number: 3 - name: "oneof_0_0" - oneof_index: 1 -} -field { - number: 4 - name: "newbie" -} -field { - number: 7 - name: "oneof_3_0" - oneof_index: 3 -} -reserved_name: "missing_oneof_field_0" -reserved_name: "missing_oneof_field_1" -reserved_name: "missing_oneof_field_2" -oneof_decl { - name: "oneof_0" -} -oneof_decl { - name: "oneof_1" -} -oneof_decl { - name: "oneof_2" -} -oneof_decl { - name: "oneof_3" -} - """ - active_proto = descriptor_pb2.DescriptorProto() - text_format.Merge(active_pb_text, active_proto) - active_source_code_info_text = """ -location { - path: [4, 1, 2, 4] - leading_comments: "field_4" -} -location { - path: [4, 1, 2, 5] - leading_comments: "field_5" -} -location { - path: [4, 1, 2, 3] - leading_comments: "field_3" -} -location { - path: [4, 1, 2, 0] - leading_comments: "field_0" -} -location { - path: [4, 1, 2, 1] - leading_comments: "field_1" -} -location { - path: [4, 0, 2, 2] - leading_comments: "ignore_0" -} -location { - path: [4, 1, 2, 6] - leading_comments: "field_6" -} -location { - path: [4, 1, 2, 2] - leading_comments: "field_2" -} -location { - path: [3] - leading_comments: "ignore_1" -} -""" - active_source_code_info = descriptor_pb2.SourceCodeInfo() - text_format.Merge(active_source_code_info_text, active_source_code_info) - shadow_pb_text = """ -field { - number: 10 - name: "hidden_envoy_deprecated_missing_oneof_field_0" - oneof_index: 0 -} -field { - number: 11 - name: "hidden_envoy_deprecated_missing_oneof_field_1" - oneof_index: 3 -} -field { - number: 11 - name: "hidden_envoy_deprecated_missing_oneof_field_2" - oneof_index: 2 -} -oneof_decl { - name: "oneof_0" -} -oneof_decl { - name: "oneof_1" -} -oneof_decl { - name: "oneof_2" -} -oneof_decl { - name: "some_removed_oneof" -} -oneof_decl { - name: "oneof_3" -} -""" - shadow_proto = descriptor_pb2.DescriptorProto() - text_format.Merge(shadow_pb_text, shadow_proto) - target_proto = descriptor_pb2.DescriptorProto() - source_code_info = api_type_context.SourceCodeInfo('fake', active_source_code_info) - fake_type_context = api_type_context.TypeContext(source_code_info, 'fake_package') - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - fake_type_context.extend_message(1, "foo", False), active_proto, shadow_proto, - target_proto, target_proto_dependencies) - target_pb_text = """ -field { - name: "oneof_1_0" - number: 9 - oneof_index: 0 -} -field { - name: "hidden_envoy_deprecated_missing_oneof_field_0" - number: 10 - oneof_index: 0 -} -field { - name: "simple_field_0" - number: 1 -} -field { - name: "oneof_2_0" - number: 0 - oneof_index: 2 -} -field { - name: "oneof_2_1" - number: 8 - oneof_index: 2 -} -field { - name: "hidden_envoy_deprecated_missing_oneof_field_2" - number: 11 - oneof_index: 2 -} -field { - name: "oneof_0_0" - number: 3 - oneof_index: 1 -} -field { - name: "newbie" - number: 4 -} -field { - name: "oneof_3_0" - number: 7 - oneof_index: 3 -} -field { - name: "hidden_envoy_deprecated_missing_oneof_field_1" - number: 11 - oneof_index: 4 -} -oneof_decl { - name: "oneof_0" -} -oneof_decl { - name: "oneof_1" -} -oneof_decl { - name: "oneof_2" -} -oneof_decl { - name: "oneof_3" -} -oneof_decl { - name: "some_removed_oneof" -} - """ - target_source_code_info_text = """ -location { - path: 4 - path: 1 - path: 2 - path: 6 - leading_comments: "field_4" -} -location { - path: 4 - path: 1 - path: 2 - path: 7 - leading_comments: "field_5" -} -location { - path: 4 - path: 1 - path: 2 - path: 4 - leading_comments: "field_3" -} -location { - path: 4 - path: 1 - path: 2 - path: 0 - leading_comments: "field_0" -} -location { - path: 4 - path: 1 - path: 2 - path: 2 - leading_comments: "field_1" -} -location { - path: 4 - path: 0 - path: 2 - path: 2 - leading_comments: "ignore_0" -} -location { - path: 4 - path: 1 - path: 2 - path: 8 - leading_comments: "field_6" -} -location { - path: 4 - path: 1 - path: 2 - path: 3 - leading_comments: "field_2" -} -location { - path: 3 - leading_comments: "ignore_1" -} -""" - self.maxDiff = None - self.assert_text_proto_eq(target_pb_text, str(target_proto)) - self.assert_text_proto_eq( - target_source_code_info_text, str(fake_type_context.source_code_info.proto)) - - def testmerge_active_shadow_message(self): - """merge_active_shadow_message recovers shadow fields with oneofs.""" - active_pb_text = """ -field { - number: 1 - name: "foo" -} -field { - number: 0 - name: "bar" - oneof_index: 2 -} -field { - number: 3 - name: "baz" -} -field { - number: 4 - name: "newbie" -} -reserved_name: "wow" -reserved_range { - start: 2 - end: 3 -} -oneof_decl { - name: "ign" -} -oneof_decl { - name: "ign2" -} -oneof_decl { - name: "some_oneof" -} - """ - active_proto = descriptor_pb2.DescriptorProto() - text_format.Merge(active_pb_text, active_proto) - shadow_pb_text = """ -field { - number: 1 - name: "foo" -} -field { - number: 0 - name: "bar" -} -field { - number: 3 - name: "baz" -} -field { - number: 5 - name: "hidden_envoy_deprecated_wow" - options { - deprecated: true - [validate.rules] { - string { - max_bytes: 1024 - } - } - [envoy.annotations.disallowed_by_default]: true - } - oneof_index: 0 -} -oneof_decl { - name: "some_oneof" -} - """ - shadow_proto = descriptor_pb2.DescriptorProto() - text_format.Merge(shadow_pb_text, shadow_proto) - target_proto = descriptor_pb2.DescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - self.fake_type_context(), active_proto, shadow_proto, target_proto, - target_proto_dependencies) - target_pb_text = """ -field { - name: "foo" - number: 1 -} -field { - name: "bar" - number: 0 - oneof_index: 2 -} -field { - name: "hidden_envoy_deprecated_wow" - number: 5 - options { - deprecated: true - [validate.rules] { - string { - max_bytes: 1024 - } - } - [envoy.annotations.disallowed_by_default]: true - } - oneof_index: 2 -} -field { - name: "baz" - number: 3 -} -field { - name: "newbie" - number: 4 -} -oneof_decl { - name: "ign" -} -oneof_decl { - name: "ign2" -} -oneof_decl { - name: "some_oneof" -} -reserved_range { - start: 2 - end: 3 -} - """ - self.assert_text_proto_eq(target_pb_text, str(target_proto)) - self.assertEqual(target_proto_dependencies[0], 'envoy/annotations/deprecation.proto') - - def testmerge_active_shadow_message_no_shadow_message(self): - """merge_active_shadow_message doesn't require a shadow message for new nested active messages.""" - active_proto = descriptor_pb2.DescriptorProto() - shadow_proto = descriptor_pb2.DescriptorProto() - active_proto.nested_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - self.fake_type_context(), active_proto, shadow_proto, target_proto, - target_proto_dependencies) - self.assertEqual(target_proto.nested_type[0].name, 'foo') - - def testmerge_active_shadow_message_no_shadow_enum(self): - """merge_active_shadow_message doesn't require a shadow enum for new nested active enums.""" - active_proto = descriptor_pb2.DescriptorProto() - shadow_proto = descriptor_pb2.DescriptorProto() - active_proto.enum_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - self.fake_type_context(), active_proto, shadow_proto, target_proto, - target_proto_dependencies) - self.assertEqual(target_proto.enum_type[0].name, 'foo') - - def testmerge_active_shadow_message_missing(self): - """merge_active_shadow_message recovers missing messages from shadow.""" - active_proto = descriptor_pb2.DescriptorProto() - shadow_proto = descriptor_pb2.DescriptorProto() - shadow_proto.nested_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - self.fake_type_context(), active_proto, shadow_proto, target_proto, - target_proto_dependencies) - self.assertEqual(target_proto.nested_type[0].name, 'foo') - - def testmerge_active_shadow_file_missing(self): - """merge_active_shadow_file recovers missing messages from shadow.""" - active_proto = descriptor_pb2.FileDescriptorProto() - shadow_proto = descriptor_pb2.FileDescriptorProto() - shadow_proto.message_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto = merge_active_shadow.merge_active_shadow_file(active_proto, shadow_proto) - self.assertEqual(target_proto.message_type[0].name, 'foo') - - def testmerge_active_shadow_file_no_shadow_message(self): - """merge_active_shadow_file doesn't require a shadow message for new active messages.""" - active_proto = descriptor_pb2.FileDescriptorProto() - shadow_proto = descriptor_pb2.FileDescriptorProto() - active_proto.message_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto = merge_active_shadow.merge_active_shadow_file(active_proto, shadow_proto) - self.assertEqual(target_proto.message_type[0].name, 'foo') - - def testmerge_active_shadow_file_no_shadow_enum(self): - """merge_active_shadow_file doesn't require a shadow enum for new active enums.""" - active_proto = descriptor_pb2.FileDescriptorProto() - shadow_proto = descriptor_pb2.FileDescriptorProto() - active_proto.enum_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto = merge_active_shadow.merge_active_shadow_file(active_proto, shadow_proto) - self.assertEqual(target_proto.enum_type[0].name, 'foo') - - -# TODO(htuch): add some test for recursion. - -if __name__ == '__main__': - utils.load_protos(merge_active_shadow.PROTO_PACKAGES) - unittest.main() diff --git a/tools/protoxform/migrate.py b/tools/protoxform/migrate.py deleted file mode 100644 index 0ab0808abc502..0000000000000 --- a/tools/protoxform/migrate.py +++ /dev/null @@ -1,276 +0,0 @@ -# API upgrade business logic. - -import copy -import re - -from tools.api_proto_plugin import traverse -from tools.api_proto_plugin import visitor -from tools.protoxform import options -from tools.protoxform import utils - -from envoy_api_canonical.envoy.annotations import resource_pb2 -from udpa.annotations import migrate_pb2 -from udpa.annotations import status_pb2 -from google.api import annotations_pb2 - -ENVOY_API_TYPE_REGEX_STR = 'envoy_api_(msg|enum_value|field|enum)_([\w\.]+)' -ENVOY_COMMENT_WITH_TYPE_REGEX = re.compile( - '<%s>|:ref:`%s`' % (ENVOY_API_TYPE_REGEX_STR, ENVOY_API_TYPE_REGEX_STR)) - - -class UpgradeVisitor(visitor.Visitor): - """Visitor to generate an upgraded proto from a FileDescriptor proto. - - See visitor.Visitor for visitor method docs comments. - """ - - def __init__(self, n, typedb, envoy_internal_shadow, package_version_status): - self._base_version = n - self._typedb = typedb - self._envoy_internal_shadow = envoy_internal_shadow - self._package_version_status = package_version_status - - def _upgraded_comment(self, c): - - def upgrade_type(match): - # We're upgrading a type within a RST anchor reference here. These are - # stylized and match the output format of tools/protodoc. We need to do - # some special handling of field/enum values, and also the normalization - # that was performed in v2 for envoy.api.v2 types. - label_ref_type, label_normalized_type_name, section_ref_type, section_normalized_type_name = match.groups( - ) - if label_ref_type is not None: - ref_type = label_ref_type - normalized_type_name = label_normalized_type_name - else: - ref_type = section_ref_type - normalized_type_name = section_normalized_type_name - if ref_type == 'field' or ref_type == 'enum_value': - normalized_type_name, residual = normalized_type_name.rsplit('.', 1) - else: - residual = '' - type_name = 'envoy.' + normalized_type_name - api_v2_type_name = 'envoy.api.v2.' + normalized_type_name - if type_name in self._typedb.types: - type_desc = self._typedb.types[type_name] - else: - # We need to deal with envoy.api.* normalization in the v2 API. We won't - # need this in v3+, so rather than churn docs, we just have this workaround. - type_desc = self._typedb.types[api_v2_type_name] - repl_type = type_desc.next_version_type_name[ - len('envoy.'):] if type_desc.next_version_type_name else normalized_type_name - # TODO(htuch): this should really either go through the type database or - # via the descriptor pool and annotations, but there are only two of these - # we need for the initial v2 -> v3 docs cut, so hard coding for now. - # Tracked at https://github.com/envoyproxy/envoy/issues/9734. - if repl_type == 'config.route.v3.RouteAction': - if residual == 'host_rewrite': - residual = 'host_rewrite_literal' - elif residual == 'auto_host_rewrite_header': - residual = 'auto_host_rewrite' - new_ref = 'envoy_api_%s_%s%s' % ( - ref_type, repl_type, '.' + residual if residual else '') - if label_ref_type is not None: - return '<%s>' % new_ref - else: - return ':ref:`%s`' % new_ref - - return re.sub(ENVOY_COMMENT_WITH_TYPE_REGEX, upgrade_type, c) - - def _upgraded_post_method(self, m): - return re.sub(r'^/v%d/' % self._base_version, '/v%d/' % (self._base_version + 1), m) - - # Upgraded type using canonical type naming, e.g. foo.bar. - def _upgraded_type_canonical(self, t): - if not t.startswith('envoy'): - return t - type_desc = self._typedb.types[t] - if type_desc.next_version_type_name: - return type_desc.next_version_type_name - return t - - # Upgraded type using internal type naming, e.g. .foo.bar. - def _upgraded_type(self, t): - if not t.startswith('.envoy'): - return t - return '.' + self._upgraded_type_canonical(t[1:]) - - def _deprecate(self, proto, field_or_value): - """Deprecate a field or value in a message/enum proto. - - Args: - proto: DescriptorProto or EnumDescriptorProto message. - field_or_value: field or value inside proto. - """ - if self._envoy_internal_shadow: - field_or_value.name = 'hidden_envoy_deprecated_' + field_or_value.name - else: - reserved = proto.reserved_range.add() - reserved.start = field_or_value.number - reserved.end = field_or_value.number + 1 - proto.reserved_name.append(field_or_value.name) - options.add_hide_option(field_or_value.options) - - def _rename(self, proto, migrate_annotation): - """Rename a field/enum/service/message - - Args: - proto: DescriptorProto or corresponding proto message - migrate_annotation: udpa.annotations.MigrateAnnotation message - """ - if migrate_annotation.rename: - proto.name = migrate_annotation.rename - migrate_annotation.rename = "" - - def _oneof_promotion(self, msg_proto, field_proto, migrate_annotation): - """Promote a field to a oneof. - - Args: - msg_proto: DescriptorProto for message containing field. - field_proto: FieldDescriptorProto for field. - migrate_annotation: udpa.annotations.FieldMigrateAnnotation message - """ - if migrate_annotation.oneof_promotion: - oneof_index = -1 - for n, oneof_decl in enumerate(msg_proto.oneof_decl): - if oneof_decl.name == migrate_annotation.oneof_promotion: - oneof_index = n - if oneof_index == -1: - oneof_index = len(msg_proto.oneof_decl) - oneof_decl = msg_proto.oneof_decl.add() - oneof_decl.name = migrate_annotation.oneof_promotion - field_proto.oneof_index = oneof_index - migrate_annotation.oneof_promotion = "" - - def visit_service(self, service_proto, type_context): - upgraded_proto = copy.deepcopy(service_proto) - for m in upgraded_proto.method: - if m.options.HasExtension(annotations_pb2.http): - http_options = m.options.Extensions[annotations_pb2.http] - # TODO(htuch): figure out a more systematic approach using the type DB - # to service upgrade. - http_options.post = self._upgraded_post_method(http_options.post) - m.input_type = self._upgraded_type(m.input_type) - m.output_type = self._upgraded_type(m.output_type) - if service_proto.options.HasExtension(resource_pb2.resource): - upgraded_proto.options.Extensions[ - resource_pb2.resource].type = self._upgraded_type_canonical( - service_proto.options.Extensions[resource_pb2.resource].type) - return upgraded_proto - - def visit_message(self, msg_proto, type_context, nested_msgs, nested_enums): - upgraded_proto = copy.deepcopy(msg_proto) - if upgraded_proto.options.deprecated and not self._envoy_internal_shadow: - options.add_hide_option(upgraded_proto.options) - options.set_versioning_annotation(upgraded_proto.options, type_context.name) - # Mark deprecated fields as ready for deletion by protoxform. - for f in upgraded_proto.field: - if f.options.deprecated: - self._deprecate(upgraded_proto, f) - if self._envoy_internal_shadow: - # When shadowing, we use the upgraded version of types (which should - # themselves also be shadowed), to allow us to avoid unnecessary - # references to the previous version (and complexities around - # upgrading during API boosting). - f.type_name = self._upgraded_type(f.type_name) - else: - # Make sure the type name is erased so it isn't picked up by protoxform - # when computing deps. - f.type_name = "" - else: - f.type_name = self._upgraded_type(f.type_name) - if f.options.HasExtension(migrate_pb2.field_migrate): - field_migrate = f.options.Extensions[migrate_pb2.field_migrate] - self._rename(f, field_migrate) - self._oneof_promotion(upgraded_proto, f, field_migrate) - # Upgrade nested messages. - del upgraded_proto.nested_type[:] - upgraded_proto.nested_type.extend(nested_msgs) - # Upgrade enums. - del upgraded_proto.enum_type[:] - upgraded_proto.enum_type.extend(nested_enums) - return upgraded_proto - - def visit_enum(self, enum_proto, type_context): - upgraded_proto = copy.deepcopy(enum_proto) - if upgraded_proto.options.deprecated and not self._envoy_internal_shadow: - options.add_hide_option(upgraded_proto.options) - for v in upgraded_proto.value: - if v.options.deprecated: - # We need special handling for the zero field, as proto3 needs some value - # here. - if v.number == 0 and not self._envoy_internal_shadow: - v.name = 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE' - else: - # Mark deprecated enum values as ready for deletion by protoxform. - self._deprecate(upgraded_proto, v) - elif v.options.HasExtension(migrate_pb2.enum_value_migrate): - self._rename(v, v.options.Extensions[migrate_pb2.enum_value_migrate]) - return upgraded_proto - - def visit_file(self, file_proto, type_context, services, msgs, enums): - upgraded_proto = copy.deepcopy(file_proto) - # Upgrade imports. - upgraded_proto.dependency[:] = [ - dependency for dependency in upgraded_proto.dependency - if dependency not in ("udpa/annotations/migrate.proto") - ] - # Upgrade package. - upgraded_proto.package = self._typedb.next_version_protos[ - upgraded_proto.name].qualified_package - upgraded_proto.name = self._typedb.next_version_protos[upgraded_proto.name].proto_path - upgraded_proto.options.ClearExtension(migrate_pb2.file_migrate) - upgraded_proto.options.Extensions[ - status_pb2.file_status].package_version_status = self._package_version_status - # Upgrade comments. - for location in upgraded_proto.source_code_info.location: - location.leading_comments = self._upgraded_comment(location.leading_comments) - location.trailing_comments = self._upgraded_comment(location.trailing_comments) - for n, c in enumerate(location.leading_detached_comments): - location.leading_detached_comments[n] = self._upgraded_comment(c) - # Upgrade services. - del upgraded_proto.service[:] - upgraded_proto.service.extend(services) - # Upgrade messages. - del upgraded_proto.message_type[:] - upgraded_proto.message_type.extend(msgs) - # Upgrade enums. - del upgraded_proto.enum_type[:] - upgraded_proto.enum_type.extend(enums) - - return upgraded_proto - - -def version_upgrade_xform(n, envoy_internal_shadow, file_proto, params): - """Transform a FileDescriptorProto from vN[alpha\d] to v(N+1). - - Args: - n: version N to upgrade from. - envoy_internal_shadow: generate a shadow for Envoy internal use containing deprecated fields. - file_proto: vN[alpha\d] FileDescriptorProto message. - params: plugin parameters. - - Returns: - v(N+1) FileDescriptorProto message. - """ - # Load type database. - if params['type_db_path']: - utils.load_type_db(params['type_db_path']) - typedb = utils.get_type_db() - # If this isn't a proto in an upgraded package, return None. - if file_proto.name not in typedb.next_version_protos or not typedb.next_version_protos[ - file_proto.name]: - return None - # Otherwise, this .proto needs upgrading, do it. - freeze = 'extra_args' in params and params['extra_args'] == 'freeze' - existing_pkg_version_status = file_proto.options.Extensions[ - status_pb2.file_status].package_version_status - # Normally, we are generating the NEXT_MAJOR_VERSION_CANDIDATE. However, if - # freezing and previously this was the active major version, the migrated - # version is now the ACTIVE version. - if freeze and existing_pkg_version_status == status_pb2.ACTIVE: - package_version_status = status_pb2.ACTIVE - else: - package_version_status = status_pb2.NEXT_MAJOR_VERSION_CANDIDATE - return traverse.traverse_file( - file_proto, UpgradeVisitor(n, typedb, envoy_internal_shadow, package_version_status)) diff --git a/tools/protoxform/protoprint.py b/tools/protoxform/protoprint.py index b30058b37a68e..5bcefd650702f 100755 --- a/tools/protoxform/protoprint.py +++ b/tools/protoxform/protoprint.py @@ -28,11 +28,7 @@ from envoy.annotations import deprecation_pb2 from udpa.annotations import migrate_pb2, status_pb2 - -PROTO_PACKAGES = ( - "google.api.annotations", "validate.validate", "envoy.annotations.resource", - "udpa.annotations.migrate", "udpa.annotations.security", "udpa.annotations.status", - "udpa.annotations.versioning", "udpa.annotations.sensitive") +from xds.annotations.v3 import status_pb2 as xds_status_pb2 NEXT_FREE_FIELD_MIN = 5 @@ -82,8 +78,9 @@ def clang_format(contents): Returns: clang-formatted string """ + clang_format_path = os.getenv("CLANG_FORMAT", "clang-format-11") return subprocess.run( - ['clang-format', + [clang_format_path, '--style=%s' % CLANG_FORMAT_STYLE, '--assume-filename=.proto'], input=contents.encode('utf-8'), stdout=subprocess.PIPE).stdout @@ -233,6 +230,10 @@ def camel_case(s): options.Extensions[migrate_pb2.file_migrate].CopyFrom( file_proto.options.Extensions[migrate_pb2.file_migrate]) + if file_proto.options.HasExtension(xds_status_pb2.file_status): + options.Extensions[xds_status_pb2.file_status].CopyFrom( + file_proto.options.Extensions[xds_status_pb2.file_status]) + if file_proto.options.HasExtension( status_pb2.file_status) and file_proto.package.endswith('alpha'): options.Extensions[status_pb2.file_status].CopyFrom( @@ -717,7 +718,7 @@ def visit_file(self, file_proto, type_context, services, msgs, enums): if __name__ == '__main__': proto_desc_path = sys.argv[1] - utils.load_protos(PROTO_PACKAGES) + utils.load_protos() file_proto = descriptor_pb2.FileDescriptorProto() input_text = pathlib.Path(proto_desc_path).read_text() diff --git a/tools/protoxform/protoxform.bzl b/tools/protoxform/protoxform.bzl index abdbac95b3963..0d3c32b6e9b2e 100644 --- a/tools/protoxform/protoxform.bzl +++ b/tools/protoxform/protoxform.bzl @@ -6,11 +6,7 @@ def _protoxform_impl(target, ctx): ctx, "proto", "protoxform", - [ - ".active_or_frozen.proto", - ".next_major_version_candidate.proto", - ".next_major_version_candidate.envoy_internal.proto", - ], + [".active_or_frozen.proto"], ) # Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html) diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py index 9e370dcc0c824..4685774b5eedb 100755 --- a/tools/protoxform/protoxform.py +++ b/tools/protoxform/protoxform.py @@ -8,16 +8,10 @@ import functools from tools.api_proto_plugin import plugin, visitor -from tools.protoxform import migrate, utils +from tools.protoxform import utils from udpa.annotations import status_pb2 -PROTO_PACKAGES = ( - "google.api.annotations", "validate.validate", - "envoy_api_canonical.envoy.annotations.deprecation", - "envoy_api_canonical.envoy.annotations.resource", "udpa.annotations.migrate", - "udpa.annotations.security", "udpa.annotations.status", "udpa.annotations.sensitive") - class ProtoXformError(Exception): """Base error class for the protoxform module.""" @@ -56,8 +50,6 @@ def visit_file(self, file_proto, type_context, services, msgs, enums): if existing_pkg_version_status == status_pb2.UNKNOWN and not pkg_version_status_exempt: raise ProtoXformError('package_version_status must be set in %s' % file_proto.name) # Only update package_version_status for .active_or_frozen.proto, - # migrate.version_upgrade_xform has taken care of next major version - # candidates. if self._active_or_frozen and not pkg_version_status_exempt: # Freeze if this is an active package with a next major version. Preserve # frozen status otherwise. @@ -74,23 +66,13 @@ def visit_file(self, file_proto, type_context, services, msgs, enums): def main(): - utils.load_protos(PROTO_PACKAGES) + utils.load_protos() plugin.plugin([ plugin.direct_output_descriptor( '.active_or_frozen.proto', functools.partial(ProtoFormatVisitor, True), want_params=True), - plugin.OutputDescriptor( - '.next_major_version_candidate.proto', - functools.partial(ProtoFormatVisitor, False), - functools.partial(migrate.version_upgrade_xform, 2, False), - want_params=True), - plugin.OutputDescriptor( - '.next_major_version_candidate.envoy_internal.proto', - functools.partial(ProtoFormatVisitor, False), - functools.partial(migrate.version_upgrade_xform, 2, True), - want_params=True) ]) diff --git a/tools/protoxform/protoxform_test.sh b/tools/protoxform/protoxform_test.sh index 69cbc859aa520..0e6dad6960718 100755 --- a/tools/protoxform/protoxform_test.sh +++ b/tools/protoxform/protoxform_test.sh @@ -19,14 +19,3 @@ bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_ //tools/testdata/protoxform:fix_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint ./tools/protoxform/protoxform_test_helper.py fix "${PROTO_TARGETS[@]}" - -# protoxform freeze test cases -PROTO_TARGETS=() -protos=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:freeze_protos))") -while read -r line; do PROTO_TARGETS+=("$line"); done \ - <<< "$protos" -bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:freeze_protos \ - --//tools/api_proto_plugin:extra_args=freeze \ - //tools/testdata/protoxform:freeze_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto -bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint -./tools/protoxform/protoxform_test_helper.py freeze "${PROTO_TARGETS[@]}" diff --git a/tools/protoxform/protoxform_test_helper.py b/tools/protoxform/protoxform_test_helper.py index b36134ce35208..c5356ed24a3f3 100755 --- a/tools/protoxform/protoxform_test_helper.py +++ b/tools/protoxform/protoxform_test_helper.py @@ -134,8 +134,6 @@ def run(cmd, path, filename, version): for target in sys.argv[2:]: path, filename = path_and_filename(target) messages += run(cmd, path, filename, 'active_or_frozen') - messages += run(cmd, path, filename, 'next_major_version_candidate') - messages += run(cmd, path, filename, 'next_major_version_candidate.envoy_internal') if len(messages) == 0: logging.warning("PASS") diff --git a/tools/protoxform/utils.py b/tools/protoxform/utils.py index 8f770eafa1dd7..47930a8e78119 100644 --- a/tools/protoxform/utils.py +++ b/tools/protoxform/utils.py @@ -4,6 +4,12 @@ from google.protobuf import text_format +PROTO_FILES = ( + "google.api.annotations", "validate.validate", "envoy.annotations.deprecation", + "envoy.annotations.resource", "udpa.annotations.migrate", "udpa.annotations.security", + "udpa.annotations.status", "udpa.annotations.sensitive", "udpa.annotations.versioning", + "xds.annotations.v3.status") + _typedb = None @@ -19,6 +25,6 @@ def load_type_db(type_db_path): text_format.Merge(f.read(), _typedb) -def load_protos(packages): - for package in packages: +def load_protos(): + for package in PROTO_FILES: importlib.import_module(f"{package}_pb2") diff --git a/tools/shell_utils.sh b/tools/shell_utils.sh index e32c3c95056b8..4a8379bb67f89 100644 --- a/tools/shell_utils.sh +++ b/tools/shell_utils.sh @@ -25,5 +25,5 @@ python_venv() { pip3 install -r "${SCRIPT_DIR}"/requirements.txt shift - python3 "${SCRIPT_DIR}/${PY_NAME}.py" "$*" + python3 "${SCRIPT_DIR}/${PY_NAME}.py" "$@" } diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 96dda9ee1dc88..09c571bd35d8f 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -11,6 +11,7 @@ ALS AMZ APC API +ARRAYSIZE ARN ASAN ASCII @@ -22,6 +23,7 @@ AWS BACKTRACE BSON BPF +Repick btree CAS CB @@ -480,6 +482,7 @@ bursty bytecode bytestream bytestring +cacert cacheable cacheability callee @@ -496,6 +499,7 @@ canonicalizer canonicalizing cardinality casted +cfg charset checkin checksum @@ -811,6 +815,7 @@ megamiss mem memcmp memcpy +memset memoize mergeable messagename @@ -851,6 +856,7 @@ namespaced namespaces namespacing nan +nanos natively ndk netblock @@ -1082,11 +1088,13 @@ sendto serializable serializer serv +servercert setenv setsockopt sig sigaction sigactions +sigaltstack siginfo signalstack siloed @@ -1291,3 +1299,11 @@ zlib OBQ SemVer SCM +SCTP +CRLF +clen +crlf +ep +suri +transid +routable diff --git a/tools/testdata/protoxform/BUILD b/tools/testdata/protoxform/BUILD index 6769f453f6ff7..b9e228605acd6 100644 --- a/tools/testdata/protoxform/BUILD +++ b/tools/testdata/protoxform/BUILD @@ -9,14 +9,3 @@ proto_library( "//tools/testdata/protoxform/envoy/v2:fix_protos", ], ) - -proto_library( - name = "freeze_protos", - visibility = ["//visibility:public"], - deps = [ - "//tools/testdata/protoxform/envoy/active_non_terminal/v2:freeze_protos", - "//tools/testdata/protoxform/envoy/active_terminal/v2:freeze_protos", - "//tools/testdata/protoxform/envoy/frozen/v2:freeze_protos", - "//tools/testdata/protoxform/envoy/frozen/v3:freeze_protos", - ], -) diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD deleted file mode 100644 index 3031a25f6bb62..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "freeze_protos", - srcs = ["active_non_terminal.proto"], - visibility = ["//visibility:public"], - deps = [ - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@envoy_api//envoy/annotations:pkg", - ], -) diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto deleted file mode 100644 index 241084d62a6ed..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; - -package envoy.active_non_terminal.v2; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveNonTerminal { - int32 foo = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - int32 bar = 2; -} diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold deleted file mode 100644 index 50b6993f398fa..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.active_non_terminal.v2; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.active_non_terminal.v2"; -option java_outer_classname = "ActiveNonTerminalProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message ActiveNonTerminal { - int32 foo = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - int32 bar = 2; -} diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index 34e9f4d7e6eaa..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.active_non_terminal.v3; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.active_non_terminal.v3"; -option java_outer_classname = "ActiveNonTerminalProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveNonTerminal { - option (udpa.annotations.versioning).previous_message_type = - "envoy.active_non_terminal.v2.ActiveNonTerminal"; - - int32 hidden_envoy_deprecated_foo = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - int32 bar = 2; -} diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold deleted file mode 100644 index 5d369aefd96df..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.active_non_terminal.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.active_non_terminal.v3"; -option java_outer_classname = "ActiveNonTerminalProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveNonTerminal { - option (udpa.annotations.versioning).previous_message_type = - "envoy.active_non_terminal.v2.ActiveNonTerminal"; - - reserved 1; - - reserved "foo"; - - int32 bar = 2; -} diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD deleted file mode 100644 index db6244be9a36f..0000000000000 --- a/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD +++ /dev/null @@ -1,10 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "freeze_protos", - srcs = ["active_terminal.proto"], - visibility = ["//visibility:public"], - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto deleted file mode 100644 index 1c5bdaca36832..0000000000000 --- a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto +++ /dev/null @@ -1,11 +0,0 @@ -syntax = "proto3"; - -package envoy.active_terminal.v2; - -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveTerminal { - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold deleted file mode 100644 index 5e49be1e63b4e..0000000000000 --- a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package envoy.active_terminal.v2; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.active_terminal.v2"; -option java_outer_classname = "ActiveTerminalProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveTerminal { - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/frozen/v2/BUILD b/tools/testdata/protoxform/envoy/frozen/v2/BUILD deleted file mode 100644 index a8556bddea134..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "freeze_protos", - srcs = [ - "frozen.proto", - "frozen_versioned_deprecation.proto", - ], - visibility = ["//visibility:public"], - deps = [ - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@envoy_api//envoy/annotations:pkg", - ], -) diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto deleted file mode 100644 index defe7ff3eac40..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v2; - -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message Frozen { - int32 foo = 1; - int32 bar = 2 [deprecated = true]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold deleted file mode 100644 index 5086376ee4354..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v2; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v2"; -option java_outer_classname = "FrozenProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message Frozen { - int32 foo = 1; - - int32 bar = 2 [deprecated = true]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index f67c7f33a3783..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message Frozen { - option (udpa.annotations.versioning).previous_message_type = "envoy.frozen.v2.Frozen"; - - int32 foo = 1; - - int32 hidden_envoy_deprecated_bar = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold deleted file mode 100644 index 7c10c1313b27c..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message Frozen { - option (udpa.annotations.versioning).previous_message_type = "envoy.frozen.v2.Frozen"; - - reserved 2; - - reserved "bar"; - - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto b/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto deleted file mode 100644 index bf5e1efa73014..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v2; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message FrozenVersionedDeprecation { - int32 foo = 1; - int32 bar = 2 [deprecated = true]; - int32 baz = 3 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.5"]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.active_or_frozen.gold deleted file mode 100644 index 521f4c3eda9a9..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.active_or_frozen.gold +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v2; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v2"; -option java_outer_classname = "FrozenVersionedDeprecationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message FrozenVersionedDeprecation { - int32 foo = 1; - - int32 bar = 2 [deprecated = true]; - - int32 baz = 3 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.5"]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e82dc9bb6cee9..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenVersionedDeprecationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message FrozenVersionedDeprecation { - option (udpa.annotations.versioning).previous_message_type = - "envoy.frozen.v2.FrozenVersionedDeprecation"; - - int32 foo = 1; - - int32 hidden_envoy_deprecated_bar = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - int32 hidden_envoy_deprecated_baz = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.5"]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.gold deleted file mode 100644 index fa99251cf2793..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenVersionedDeprecationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message FrozenVersionedDeprecation { - option (udpa.annotations.versioning).previous_message_type = - "envoy.frozen.v2.FrozenVersionedDeprecation"; - - reserved 2, 3; - - reserved "bar", "baz"; - - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/BUILD b/tools/testdata/protoxform/envoy/frozen/v3/BUILD deleted file mode 100644 index 39fb4eabe7238..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "freeze_protos", - srcs = [ - "frozen.proto", - "frozen_versioned_deprecation.proto", - ], - visibility = ["//visibility:public"], - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto deleted file mode 100644 index 0e09acf92fe69..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message Frozen { - int32 foo = 1; - reserved 2; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold deleted file mode 100644 index 23740e54e11f3..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message Frozen { - reserved 2; - - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto b/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto deleted file mode 100644 index e95fc725c6b7a..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message FrozenVersionedDeprecation { - int32 foo = 1; - reserved 2, 3; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.active_or_frozen.gold deleted file mode 100644 index d280f1f632be2..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.active_or_frozen.gold +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenVersionedDeprecationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message FrozenVersionedDeprecation { - reserved 2, 3; - - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/v2/BUILD b/tools/testdata/protoxform/envoy/v2/BUILD index 18cca27da4c6c..1a3bcb6e90b38 100644 --- a/tools/testdata/protoxform/envoy/v2/BUILD +++ b/tools/testdata/protoxform/envoy/v2/BUILD @@ -19,18 +19,3 @@ proto_library( "@envoy_api//envoy/api/v2:pkg", ], ) - -proto_library( - name = "freeze_protos", - srcs = [ - "active_non_terminal.proto", - "active_terminal.proto", - "frozen.proto", - ], - visibility = ["//visibility:public"], - deps = [ - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@envoy_api//envoy/annotations:pkg", - "@envoy_api//envoy/api/v2:pkg", - ], -) diff --git a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index cd6b36941d926..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "DiscoveryServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -service SomeDiscoveryService { - option (envoy.annotations.resource).type = "envoy.v3.SomeResource"; - - rpc StreamSomething(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaSomething(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc FetchSomething(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:some"; - option (google.api.http).body = "*"; - } -} - -message SomeResource { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.SomeResource"; - - string bar = 1; -} diff --git a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold deleted file mode 100644 index cd6b36941d926..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "DiscoveryServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -service SomeDiscoveryService { - option (envoy.annotations.resource).type = "envoy.v3.SomeResource"; - - rpc StreamSomething(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaSomething(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc FetchSomething(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:some"; - option (google.api.http).body = "*"; - } -} - -message SomeResource { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.SomeResource"; - - string bar = 1; -} diff --git a/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index fe6bb1585b87d..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.external.v3; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "tools/testdata/protoxform/external/package_type.proto"; -import "tools/testdata/protoxform/external/root_type.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.external.v3"; -option java_outer_classname = "FullyQualifiedNamesProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// Verifies normalization of fully-qualified type names. -// [#next-free-field: 8] -message UsesFullyQualifiedTypeNames { - option (udpa.annotations.versioning).previous_message_type = - "envoy.v2.UsesFullyQualifiedTypeNames"; - - api.v2.core.Locality another_envoy_type = 1; - - api.v2.core.Locality another_envoy_type_fqn = 2; - - google.protobuf.Any google_protobuf_any = 3; - - google.protobuf.Any google_protobuf_any_fqn = 4; - - .external.PackageLevelType external_package_level_type = 5; - - .external.PackageLevelType external_package_level_type_fqn = 6; - - .RootLevelType external_root_level_type_fqn = 7; -} diff --git a/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.gold deleted file mode 100644 index fe6bb1585b87d..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.external.v3; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "tools/testdata/protoxform/external/package_type.proto"; -import "tools/testdata/protoxform/external/root_type.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.external.v3"; -option java_outer_classname = "FullyQualifiedNamesProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// Verifies normalization of fully-qualified type names. -// [#next-free-field: 8] -message UsesFullyQualifiedTypeNames { - option (udpa.annotations.versioning).previous_message_type = - "envoy.v2.UsesFullyQualifiedTypeNames"; - - api.v2.core.Locality another_envoy_type = 1; - - api.v2.core.Locality another_envoy_type_fqn = 2; - - google.protobuf.Any google_protobuf_any = 3; - - google.protobuf.Any google_protobuf_any_fqn = 4; - - .external.PackageLevelType external_package_level_type = 5; - - .external.PackageLevelType external_package_level_type_fqn = 6; - - .RootLevelType external_root_level_type_fqn = 7; -} diff --git a/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index acd2fee1a9621..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "OneofProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message OneofExample { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.OneofExample"; - - oneof baz_specifier { - string foo = 1; - } - - oneof bar_specifier { - string bar = 2; - - string blah = 3; - } -} diff --git a/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold deleted file mode 100644 index acd2fee1a9621..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "OneofProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message OneofExample { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.OneofExample"; - - oneof baz_specifier { - string foo = 1; - } - - oneof bar_specifier { - string bar = 2; - - string blah = 3; - } -} diff --git a/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e7aaa8085a3e5..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.foo.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.foo.v3"; -option java_outer_classname = "PackageMoveProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message Package { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package"; - - message Entry { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package.Entry"; - - string key = 1; - - string value = 2; - } - - repeated Entry entries = 1; -} diff --git a/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold deleted file mode 100644 index e7aaa8085a3e5..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.foo.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.foo.v3"; -option java_outer_classname = "PackageMoveProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message Package { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package"; - - message Entry { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package.Entry"; - - string key = 1; - - string value = 2; - } - - repeated Entry entries = 1; -} diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index 46cf693236ef9..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,63 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "SampleProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -enum SomeEnum { - hidden_envoy_deprecated_DEFAULT = 0 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.7"]; - FOO = 1; - hidden_envoy_deprecated_BAR = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.7"]; - WOW = 3; - hidden_envoy_deprecated_OLD = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.6"]; - hidden_envoy_deprecated_DEP = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.7"]; - hidden_envoy_deprecated_VERY_OLD = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.5"]; -} - -// [#next-free-field: 7] -message Sample { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample"; - - enum DeprecateEnum { - option deprecated = true; - - FIRST = 0; - SECOND = 1; - } - - message Entry { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample.Entry"; - - string key = 1; - - string value = 2; - } - - repeated Entry entries = 1; - - string hidden_envoy_deprecated_will_deprecated = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - string renamed_component = 3; - - string hidden_envoy_deprecated_old_deprecated = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.6"]; - - string hidden_envoy_deprecated_new_deprecated = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - string hidden_envoy_deprecated_very_old_deprecated = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.5"]; -} diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold deleted file mode 100644 index 0c07d7a04cd06..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "SampleProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -enum SomeEnum { - reserved 2, 4, 5, 6; - - reserved "BAR", "OLD", "DEP", "VERY_OLD"; - - DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 [deprecated = true]; - FOO = 1; - WOW = 3; -} - -// [#next-free-field: 7] -message Sample { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample"; - - message Entry { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample.Entry"; - - string key = 1; - - string value = 2; - } - - reserved 2, 4, 5, 6; - - reserved "will_deprecated", "old_deprecated", "new_deprecated", "very_old_deprecated"; - - repeated Entry entries = 1; - - string renamed_component = 3; -} diff --git a/tools/testing/BUILD b/tools/testing/BUILD index 35b9cf843a286..2ddf8fe5b1435 100644 --- a/tools/testing/BUILD +++ b/tools/testing/BUILD @@ -25,8 +25,8 @@ envoy_py_binary( requirement("pytest-asyncio"), requirement("pytest-cov"), requirement("pytest-patches"), - "//tools/base:runner", - "//tools/base:utils", + requirement("envoy.base.runner"), + requirement("envoy.base.utils"), ], ) @@ -35,18 +35,18 @@ envoy_py_binary( data = [ ":plugin", "//:.coveragerc", - "//tools/base:runner", - "//tools/base:utils", ], deps = [ requirement("coverage"), + requirement("envoy.base.runner"), + requirement("envoy.base.utils"), ], ) envoy_py_binary( name = "tools.testing.all_pytests", deps = [ - "//tools/base:checker", - "//tools/base:utils", + requirement("envoy.base.checker"), + requirement("envoy.base.utils"), ], ) diff --git a/tools/testing/all_pytests.py b/tools/testing/all_pytests.py index 4225add80659a..ea4749763b48d 100644 --- a/tools/testing/all_pytests.py +++ b/tools/testing/all_pytests.py @@ -12,7 +12,7 @@ from functools import cached_property from typing import Optional -from tools.base import checker, runner +from envoy.base import checker, runner class PytestChecker(checker.BazelChecker): diff --git a/tools/testing/python_coverage.py b/tools/testing/python_coverage.py index 6d1a6c9aa00e5..109be0183cc17 100755 --- a/tools/testing/python_coverage.py +++ b/tools/testing/python_coverage.py @@ -17,7 +17,7 @@ from coverage import cmdline # type:ignore -from tools.base import runner, utils +from envoy.base import runner, utils class CoverageRunner(runner.Runner): diff --git a/tools/testing/python_pytest.py b/tools/testing/python_pytest.py index 37cbe96eeb521..de9fd1ad41561 100755 --- a/tools/testing/python_pytest.py +++ b/tools/testing/python_pytest.py @@ -16,7 +16,7 @@ import pytest -from tools.base import runner, utils +from envoy.base import runner, utils class PytestRunner(runner.Runner): diff --git a/tools/testing/tests/test_all_pytests.py b/tools/testing/tests/test_all_pytests.py index 453d80247af2a..bdb36872782cc 100644 --- a/tools/testing/tests/test_all_pytests.py +++ b/tools/testing/tests/test_all_pytests.py @@ -3,7 +3,7 @@ import pytest -from tools.base.runner import BazelRunError +from envoy.base.runner import BazelRunError from tools.testing import all_pytests diff --git a/tools/type_whisperer/BUILD b/tools/type_whisperer/BUILD index e7d9f455e7fbe..37eead8dc9066 100644 --- a/tools/type_whisperer/BUILD +++ b/tools/type_whisperer/BUILD @@ -56,7 +56,7 @@ py_binary( label_flag( name = "api_type_db_target", - build_setting_default = "@envoy_api_canonical//versioning:active_protos", + build_setting_default = "@envoy_api//versioning:active_protos", visibility = ["//visibility:public"], ) @@ -68,14 +68,14 @@ type_database( file_descriptor_set_text( name = "all_protos_pb_text", - deps = ["@envoy_api_canonical//:all_protos"], + deps = ["@envoy_api//:all_protos"], ) file_descriptor_set_text( name = "all_protos_with_ext_pb_text", with_external_deps = True, deps = [ - "@envoy_api_canonical//:all_protos", + "@envoy_api//:all_protos", ], ) diff --git a/tools/type_whisperer/file_descriptor_set_text.bzl b/tools/type_whisperer/file_descriptor_set_text.bzl index 18a5c2e720503..1ae43200956a3 100644 --- a/tools/type_whisperer/file_descriptor_set_text.bzl +++ b/tools/type_whisperer/file_descriptor_set_text.bzl @@ -29,7 +29,7 @@ file_descriptor_set_text = rule( doc = "List of all proto_library deps to be included.", ), "proto_repositories": attr.string_list( - default = ["envoy_api_canonical"], + default = ["envoy_api"], allow_empty = False, ), "with_external_deps": attr.bool( diff --git a/tools/type_whisperer/proto_cc_source.bzl b/tools/type_whisperer/proto_cc_source.bzl index 9c4522a502c6a..de6a18c5a5da6 100644 --- a/tools/type_whisperer/proto_cc_source.bzl +++ b/tools/type_whisperer/proto_cc_source.bzl @@ -24,7 +24,7 @@ proto_cc_source = rule( doc = "List of all text protos to be included.", ), "proto_repositories": attr.string_list( - default = ["envoy_api_canonical"], + default = ["envoy_api"], allow_empty = False, ), "_proto_cc_source_gen": attr.label( diff --git a/tools/type_whisperer/type_database.bzl b/tools/type_whisperer/type_database.bzl index 76c75c440ef40..bc0e63a1c2427 100644 --- a/tools/type_whisperer/type_database.bzl +++ b/tools/type_whisperer/type_database.bzl @@ -28,7 +28,7 @@ type_database = rule( doc = "List of all proto_library target to be included.", ), "proto_repositories": attr.string_list( - default = ["envoy_api_canonical"], + default = ["envoy_api"], allow_empty = False, ), "_type_db_gen": attr.label( diff --git a/tools/type_whisperer/typedb_gen.py b/tools/type_whisperer/typedb_gen.py index b89e3efdcbde1..5d418c3355912 100644 --- a/tools/type_whisperer/typedb_gen.py +++ b/tools/type_whisperer/typedb_gen.py @@ -10,6 +10,8 @@ from tools.type_whisperer.api_type_db_pb2 import TypeDb from tools.type_whisperer.types_pb2 import Types, TypeDescription +# TODO(htuch): cleanup this file, remove type upgrade, simplify. + # Regexes governing v3upgrades. TODO(htuch): The regex approach will have # to be rethought as we go beyond v3, this is WiP. TYPE_UPGRADE_REGEXES = [ @@ -174,16 +176,6 @@ def next_version_upgrade(type_name, type_map, next_version_upgrade_memo, visited type_desc = type_db.types[t] type_desc.qualified_package = type_map[t].qualified_package type_desc.proto_path = type_map[t].proto_path - if type_desc.qualified_package in next_versions_pkgs: - type_desc.next_version_type_name = upgraded_type(t, type_map[t]) - assert (type_desc.next_version_type_name != t) - next_proto_info[type_map[t].proto_path] = ( - type_map[type_desc.next_version_type_name].proto_path, - type_map[type_desc.next_version_type_name].qualified_package) - for proto_path, (next_proto_path, next_package) in sorted(next_proto_info.items()): - if not next_package.endswith('.v4alpha'): - type_db.next_version_protos[proto_path].proto_path = next_proto_path - type_db.next_version_protos[proto_path].qualified_package = next_package # Write out proto text. with open(out_path, 'w') as f: