diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml
index 6bc20230bafe7..c16691ca580e4 100644
--- a/.azure-pipelines/pipelines.yml
+++ b/.azure-pipelines/pipelines.yml
@@ -244,8 +244,9 @@ stages:
matrix:
api:
CI_TARGET: "bazel.api"
- api_compat:
- CI_TARGET: "bazel.api_compat"
+ # Disabled due to https://github.com/envoyproxy/envoy/pull/18218
+ # api_compat:
+ # CI_TARGET: "bazel.api_compat"
gcc:
CI_TARGET: "bazel.gcc"
clang_tidy:
@@ -381,6 +382,15 @@ stages:
GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
displayName: "Generate docs"
+ - script: |
+ ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/docs docs'
+ displayName: "Upload Docs to GCS"
+ env:
+ ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
+ GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
+ GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket)
+ condition: eq(variables['Build.SourceBranch'], 'refs/heads/main')
+
- task: InstallSSHKey@0
inputs:
hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=="
@@ -393,6 +403,7 @@ stages:
workingDirectory: $(Build.SourcesDirectory)
env:
AZP_BRANCH: $(Build.SourceBranch)
+ NETLIFY_TRIGGER_URL: $(NetlifyTriggerURL)
- stage: verify
dependsOn: ["docker"]
@@ -454,7 +465,7 @@ stages:
testRunTitle: "macOS"
condition: always()
- - script: ./ci/flaky_test/run_process_xml.sh
+ - script: bazel run //ci/flaky_test:process_xml
displayName: "Process Test Results"
env:
TEST_TMPDIR: $(Build.SourcesDirectory)
@@ -501,46 +512,23 @@ stages:
artifactName: windows.release
condition: always()
- - job: clang_cl
- timeoutInMinutes: 120
- pool:
- vmImage: "windows-latest"
- steps:
- - task: Cache@2
- inputs:
- key: '"windows.release" | ./WORKSPACE | **/*.bzl'
- path: $(Build.StagingDirectory)/repository_cache
- continueOnError: true
- - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh
- displayName: "Run Windows clang-cl CI"
- env:
- CI_TARGET: "windows"
- ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)"
- SLACK_TOKEN: $(SLACK_TOKEN)
- REPO_URI: $(Build.Repository.Uri)
- BUILD_URI: $(Build.BuildUri)
- ENVOY_RBE: "true"
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-clang-cl --jobs=$(RbeJobs) --flaky_test_attempts=2"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
- - task: PublishTestResults@2
- inputs:
- testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml"
- testRunTitle: "clang-cl"
- searchFolder: $(Build.StagingDirectory)/tmp
- condition: always()
- - task: PublishBuildArtifacts@1
- inputs:
- pathtoPublish: "$(Build.StagingDirectory)/envoy"
- artifactName: windows.clang-cl
- condition: always()
-
- job: docker
+ strategy:
+ matrix:
+ windows2019:
+ imageName: 'windows-latest'
+ windowsBuildType: "windows"
+ windowsImageBase: "mcr.microsoft.com/windows/servercore"
+ windowsImageTag: "ltsc2019"
+ windows2022:
+ imageName: 'windows-2022'
+ windowsBuildType: "windows-ltsc2022"
+ windowsImageBase: "mcr.microsoft.com/windows/nanoserver"
+ windowsImageTag: "ltsc2022"
dependsOn: ["release"]
timeoutInMinutes: 120
pool:
- vmImage: "windows-latest"
+ vmImage: $(imageName)
steps:
- task: DownloadBuildArtifacts@0
inputs:
@@ -561,6 +549,9 @@ stages:
AZP_SHA1: $(Build.SourceVersion)
DOCKERHUB_USERNAME: $(DockerUsername)
DOCKERHUB_PASSWORD: $(DockerPassword)
+ WINDOWS_BUILD_TYPE: $(windowsBuildType)
+ WINDOWS_IMAGE_BASE: $(windowsImageBase)
+ WINDOWS_IMAGE_TAG: $(windowsImageTag)
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: "$(Build.StagingDirectory)/build_images"
diff --git a/.bazelrc b/.bazelrc
index aa0bd78598253..d71b1261bd08c 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -167,17 +167,17 @@ build:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh
build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
build:rbe-toolchain-clang --config=rbe-toolchain
-build:rbe-toolchain-clang --platforms=@rbe_ubuntu_clang//config:platform
-build:rbe-toolchain-clang --host_platform=@rbe_ubuntu_clang//config:platform
-build:rbe-toolchain-clang --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
-build:rbe-toolchain-clang --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
+build:rbe-toolchain-clang --platforms=@envoy_build_tools//toolchains:rbe_linux_clang_platform
+build:rbe-toolchain-clang --host_platform=@envoy_build_tools//toolchains:rbe_linux_clang_platform
+build:rbe-toolchain-clang --crosstool_top=@envoy_build_tools//toolchains/configs/linux/clang/cc:toolchain
+build:rbe-toolchain-clang --extra_toolchains=@envoy_build_tools//toolchains/configs/linux/clang/config:cc-toolchain
build:rbe-toolchain-clang --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin
build:rbe-toolchain-clang-libc++ --config=rbe-toolchain
-build:rbe-toolchain-clang-libc++ --platforms=@rbe_ubuntu_clang_libcxx//config:platform
-build:rbe-toolchain-clang-libc++ --host_platform=@rbe_ubuntu_clang_libcxx//config:platform
-build:rbe-toolchain-clang-libc++ --crosstool_top=@rbe_ubuntu_clang_libcxx//cc:toolchain
-build:rbe-toolchain-clang-libc++ --extra_toolchains=@rbe_ubuntu_clang_libcxx//config:cc-toolchain
+build:rbe-toolchain-clang-libc++ --platforms=@envoy_build_tools//toolchains:rbe_linux_clang_libcxx_platform
+build:rbe-toolchain-clang-libc++ --host_platform=@envoy_build_tools//toolchains:rbe_linux_clang_libcxx_platform
+build:rbe-toolchain-clang-libc++ --crosstool_top=@envoy_build_tools//toolchains/configs/linux/clang_libcxx/cc:toolchain
+build:rbe-toolchain-clang-libc++ --extra_toolchains=@envoy_build_tools//toolchains/configs/linux/clang_libcxx/config:cc-toolchain
build:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin
build:rbe-toolchain-clang-libc++ --action_env=CXXFLAGS=-stdlib=libc++
build:rbe-toolchain-clang-libc++ --action_env=LDFLAGS=-stdlib=libc++
@@ -202,20 +202,20 @@ build:rbe-toolchain-tsan --linkopt=-Wl,-rpath,/opt/libcxx_tsan/lib
build:rbe-toolchain-tsan --config=clang-tsan
build:rbe-toolchain-gcc --config=rbe-toolchain
-build:rbe-toolchain-gcc --platforms=@rbe_ubuntu_gcc//config:platform
-build:rbe-toolchain-gcc --host_platform=@rbe_ubuntu_gcc//config:platform
-build:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain
-build:rbe-toolchain-gcc --extra_toolchains=@rbe_ubuntu_gcc//config:cc-toolchain
+build:rbe-toolchain-gcc --platforms=@envoy_build_tools//toolchains:rbe_linux_gcc_platform
+build:rbe-toolchain-gcc --host_platform=@envoy_build_tools//toolchains:rbe_linux_gcc_platform
+build:rbe-toolchain-gcc --crosstool_top=@envoy_build_tools//toolchains/configs/linux/gcc/cc:toolchain
+build:rbe-toolchain-gcc --extra_toolchains=@envoy_build_tools//toolchains/configs/linux/gcc/config:cc-toolchain
-build:rbe-toolchain-msvc-cl --host_platform=@rbe_windows_msvc_cl//config:platform
-build:rbe-toolchain-msvc-cl --platforms=@rbe_windows_msvc_cl//config:platform
-build:rbe-toolchain-msvc-cl --crosstool_top=@rbe_windows_msvc_cl//cc:toolchain
-build:rbe-toolchain-msvc-cl --extra_toolchains=@rbe_windows_msvc_cl//config:cc-toolchain
+build:rbe-toolchain-msvc-cl --host_platform=@envoy_build_tools//toolchains:rbe_windows_msvc_cl_platform
+build:rbe-toolchain-msvc-cl --platforms=@envoy_build_tools//toolchains:rbe_windows_msvc_cl_platform
+build:rbe-toolchain-msvc-cl --crosstool_top=@envoy_build_tools//toolchains/configs/windows/msvc-cl/cc:toolchain
+build:rbe-toolchain-msvc-cl --extra_toolchains=@envoy_build_tools//toolchains/configs/windows/msvc-cl/config:cc-toolchain
-build:rbe-toolchain-clang-cl --host_platform=@rbe_windows_clang_cl//config:platform
-build:rbe-toolchain-clang-cl --platforms=@rbe_windows_clang_cl//config:platform
-build:rbe-toolchain-clang-cl --crosstool_top=@rbe_windows_clang_cl//cc:toolchain
-build:rbe-toolchain-clang-cl --extra_toolchains=@rbe_windows_clang_cl//config:cc-toolchain
+build:rbe-toolchain-clang-cl --host_platform=@envoy_build_tools//toolchains:rbe_windows_clang_cl_platform
+build:rbe-toolchain-clang-cl --platforms=@envoy_build_tools//toolchains:rbe_windows_clang_cl_platform
+build:rbe-toolchain-clang-cl --crosstool_top=@envoy_build_tools//toolchains/configs/windows/clang-cl/cc:toolchain
+build:rbe-toolchain-clang-cl --extra_toolchains=@envoy_build_tools//toolchains/configs/windows/clang-cl/config:cc-toolchain
build:remote --spawn_strategy=remote,sandboxed,local
build:remote --strategy=Javac=remote,sandboxed,local
@@ -265,7 +265,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl
# Docker sandbox
# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/main/toolchains/rbe_toolchains_config.bzl#L8
-build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:55d9e4719d2bd0accce8f829b44dab70cd42112a
+build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:81a93046060dbe5620d5b3aa92632090a9ee4da6
build:docker-sandbox --spawn_strategy=docker
build:docker-sandbox --strategy=Javac=docker
build:docker-sandbox --strategy=Closure=docker
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 378a45b4f1af2..8da8671498066 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -1,4 +1,4 @@
-FROM gcr.io/envoy-ci/envoy-build:55d9e4719d2bd0accce8f829b44dab70cd42112a
+FROM gcr.io/envoy-ci/envoy-build:81a93046060dbe5620d5b3aa92632090a9ee4da6
ARG USERNAME=vscode
ARG USER_UID=501
diff --git a/.github/actions/pr_notifier/pr_notifier.py b/.github/actions/pr_notifier/pr_notifier.py
index 9c6277ddac81d..de2aba2f6c2b3 100644
--- a/.github/actions/pr_notifier/pr_notifier.py
+++ b/.github/actions/pr_notifier/pr_notifier.py
@@ -69,6 +69,10 @@ def is_waiting(labels):
return False
+def is_contrib(labels):
+ return any(label.name == "contrib" for label in labels)
+
+
# Return true if the PR has an API tag, false otherwise.
def is_api(labels):
for label in labels:
@@ -174,7 +178,7 @@ def track_prs():
pr_info.assignees, maintainers_and_prs, message, MAINTAINERS, FIRST_PASS)
# If there was no maintainer, track it as unassigned.
- if not has_maintainer_assignee:
+ if not has_maintainer_assignee and not is_contrib(labels):
maintainers_and_prs['unassigned'] = maintainers_and_prs['unassigned'] + message
# Return the dict of {maintainers : PR notifications},
diff --git a/.github/actions/pr_notifier/requirements.txt b/.github/actions/pr_notifier/requirements.txt
index 2fa1aad74b299..fb21f429db9fa 100644
--- a/.github/actions/pr_notifier/requirements.txt
+++ b/.github/actions/pr_notifier/requirements.txt
@@ -63,9 +63,9 @@ chardet==4.0.0 \
--hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \
--hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5
# via requests
-deprecated==1.2.12 \
- --hash=sha256:08452d69b6b5bc66e8330adde0a4f8642e969b9e1702904d137eeb29c8ffc771 \
- --hash=sha256:6d2de2de7931a968874481ef30208fd4e08da39177d61d3d4ebdf4366e7dbca1
+deprecated==1.2.13 \
+ --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d \
+ --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d
# via pygithub
idna==2.10 \
--hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \
@@ -78,7 +78,7 @@ pycparser==2.20 \
pygithub==1.55 \
--hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \
--hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b
- # via -r .github/actions/pr_notifier/requirements.txt
+ # via -r requirements.in
pyjwt==2.1.0 \
--hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \
--hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130
@@ -111,10 +111,10 @@ six==1.16.0 \
--hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
--hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
# via pynacl
-slack-sdk==3.10.1 \
- --hash=sha256:f17b71a578e94204d9033bffded634475f4ca0a6274c6c7a4fd8a9cb0ac7cd8b \
- --hash=sha256:2b4dde7728eb4ff5a581025d204578ccff25a5d8f0fe11ae175e3ce6e074434f
- # via -r .github/actions/pr_notifier/requirements.txt
+slack_sdk==3.11.2 \
+ --hash=sha256:131bf605894525c2d66da064677eabc19f53f02ce0f82a3f2fa130d4ec3bc1b0 \
+ --hash=sha256:35245ec34c8549fbb5c43ccc17101afd725b3508bb784da46530b214f496bf93
+ # via -r requirements.in
urllib3==1.26.6 \
--hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \
--hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 4aeb246c9db95..4d2c6b1592534 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -36,21 +36,6 @@ updates:
schedule:
interval: "daily"
-- package-ecosystem: "pip"
- directory: "/tools/deprecate_features"
- schedule:
- interval: "daily"
-
-- package-ecosystem: "pip"
- directory: "/tools/deprecate_version"
- schedule:
- interval: "daily"
-
-- package-ecosystem: "pip"
- directory: "/ci/flaky_test"
- schedule:
- interval: "daily"
-
- package-ecosystem: "docker"
directory: "/ci"
schedule:
diff --git a/.github/workflows/check-deps.yml b/.github/workflows/check-deps.yml
new file mode 100644
index 0000000000000..48444f2578101
--- /dev/null
+++ b/.github/workflows/check-deps.yml
@@ -0,0 +1,34 @@
+name: Check for latest_release of deps
+
+on :
+ schedule :
+ - cron : '0 8 * * *'
+
+ workflow_dispatch :
+
+jobs :
+ build :
+ runs-on : ubuntu-latest
+ steps :
+ - name : checkout
+ uses : actions/checkout/@v2
+ with :
+ ref : ${{ github.head_ref }}
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install virtualenv
+
+ - name: setting up virtualenv
+ run : |
+ export GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}
+ # --create_issues flag to create issue only in github action
+ # and not interfere with the CI
+ ./tools/dependency/release_dates.sh ./bazel/repository_locations.bzl --create_issues
+ ./tools/dependency/release_dates.sh ./api/bazel/repository_locations.bzl --create_issues
diff --git a/BUILD b/BUILD
index 9e35562c085fb..747d512e7e9f4 100644
--- a/BUILD
+++ b/BUILD
@@ -8,6 +8,11 @@ exports_files([
".coveragerc",
])
+alias(
+ name = "envoy",
+ actual = "//source/exe:envoy",
+)
+
# These two definitions exist to help reduce Envoy upstream core code depending on extensions.
# To avoid visibility problems, see notes in source/extensions/extensions_build_config.bzl
#
diff --git a/CODEOWNERS b/CODEOWNERS
index 1cb3aeacdede5..7e2d7dda5390d 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -101,8 +101,6 @@ extensions/filters/common/original_src @snowp @klarose
/*/extensions/wasm_runtime/ @PiotrSikora @mathetake @lizan
# common matcher
/*/extensions/common/matcher @mattklein123 @yangminzhu
-# common crypto extension
-/*/extensions/common/crypto @lizan @bdecoste @asraa
/*/extensions/common/proxy_protocol @alyssawilk @wez470
/*/extensions/filters/http/grpc_http1_bridge @snowp @jose
/*/extensions/filters/http/gzip @gsagula @dio
@@ -110,6 +108,7 @@ extensions/filters/common/original_src @snowp @klarose
/*/extensions/filters/common/fault @rshriram @alyssawilk
/*/extensions/filters/http/grpc_json_transcoder @qiwzhang @lizan
/*/extensions/filters/http/router @alyssawilk @mattklein123 @snowp
+/*/extensions/filters/common/rbac/matchers @conqerAtapple @ggreenway @alyssawilk
/*/extensions/filters/http/grpc_web @fengli79 @lizan
/*/extensions/filters/http/grpc_stats @kyessenov @lizan
/*/extensions/filters/common/original_src @klarose @snowp
@@ -203,3 +202,5 @@ extensions/filters/http/oauth2 @rgs1 @derekargueta @snowp
/contrib/mysql_proxy/ @rshriram @venilnoronha
/contrib/postgres_proxy/ @fabriziomello @cpakulski @dio
/contrib/sxg/ @cpapazian @rgs1 @alyssawilk
+/contrib/sip_proxy/ @durd07 @nearbyfly @dorisd0102
+/contrib/cryptomb/ @rojkov @ipuustin
diff --git a/EXTENSION_POLICY.md b/EXTENSION_POLICY.md
index 7ef47bcd6cf13..2efa3f6ddf9c7 100644
--- a/EXTENSION_POLICY.md
+++ b/EXTENSION_POLICY.md
@@ -92,8 +92,8 @@ The `status` is one of:
The extension status may be adjusted by the extension [CODEOWNERS](./CODEOWNERS) and/or Envoy
maintainers based on an assessment of the above criteria. Note that the status of the extension
reflects the implementation status. It is orthogonal to the API stability, for example, an extension
-with configuration `envoy.foo.v3alpha.Bar` might have a `stable` implementation and
-`envoy.foo.v3.Baz` can have a `wip` implementation.
+API marked with `(xds.annotations.v3.file_status).work_in_progress` might have a `stable` implementation and
+and an extension with a stable config proto can have a `wip` implementation.
The `security_posture` is one of:
* `robust_to_untrusted_downstream`: The extension is hardened against untrusted downstream traffic. It
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
index 13342260c7bc1..53659efb67346 100644
--- a/GOVERNANCE.md
+++ b/GOVERNANCE.md
@@ -98,7 +98,8 @@ or you can subscribe to the iCal feed [here](webcal://kubernetes.app.opsgenie.co
* From the envoy [landing page](https://github.com/envoyproxy/envoy) use the branch drop-down to create a branch
from the tagged release, e.g. "release/v1.6". It will be used for the
[stable releases](RELEASES.md#stable-releases).
-* Monitor the AZP tag build to make sure that the final docker images get pushed along with
+* Tagging will kick off another run of [AZP postsubmit](https://dev.azure.com/cncf/envoy/_build?definitionId=11). Monitor that
+ tag build to make sure that the final docker images get pushed along with
the final docs. The final documentation will end up in the
[envoyproxy.github.io repository](https://github.com/envoyproxy/envoyproxy.github.io/tree/main/docs/envoy).
* Update the website ([example PR](https://github.com/envoyproxy/envoyproxy.github.io/pull/148)) for the new release.
@@ -140,7 +141,7 @@ New Features
Deprecated
----------
```
-* Run the deprecate_versions.py script (e.g. `sh tools/deprecate_version/deprecate_version.sh`)
+* Run the deprecate_versions.py script (e.g. `bazel run //tools/deprecate_version:deprecate_version`)
to file tracking issues for runtime guarded code which can be removed.
* Check source/common/runtime/runtime_features.cc and see if any runtime guards in
disabled_runtime_features should be reassessed, and ping on the relevant issues.
diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md
index 97efd55d01fe0..7fb18f6497f9c 100644
--- a/PULL_REQUESTS.md
+++ b/PULL_REQUESTS.md
@@ -102,6 +102,19 @@ you may instead just tag the PR with the issue:
\#Issue
+### Commit
+
+If this PR fixes or reverts a buggy commit, please add a line of the form:
+
+Fixes commit #PR
+
+or
+
+Fixes commit SHA
+
+This will allow automated tools to detect tainted commit ranges on the main branch when the PR is
+merged.
+
### Deprecated
If this PR deprecates existing Envoy APIs or code, it should include an update to the deprecated
diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md
index 73e13c82eea39..27f0f9ef9df8a 100644
--- a/PULL_REQUEST_TEMPLATE.md
+++ b/PULL_REQUEST_TEMPLATE.md
@@ -21,5 +21,6 @@ Release Notes:
Platform Specific Features:
[Optional Runtime guard:]
[Optional Fixes #Issue]
+[Optional Fixes commit #PR or SHA]
[Optional Deprecated:]
[Optional [API Considerations](https://github.com/envoyproxy/envoy/blob/main/api/review_checklist.md):]
diff --git a/RELEASES.md b/RELEASES.md
index c72eeb63805a8..1619b0d22d729 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -72,6 +72,7 @@ deadline of 3 weeks.
| 1.17.0 | 2020/12/31 | 2021/01/11 | +11 days | 2022/01/11 |
| 1.18.0 | 2021/03/31 | 2021/04/15 | +15 days | 2022/04/15 |
| 1.19.0 | 2021/06/30 | 2021/07/13 | +13 days | 2022/07/13 |
-| 1.20.0 | 2021/09/30 | | | |
+| 1.20.0 | 2021/09/30 | 2021/10/05 | +5 days | 2022/10/13 |
+| 1.21.0 | 2021/12/30 | | | |
[repokitteh]: https://github.com/repokitteh
diff --git a/VERSION b/VERSION
index 734375f897d07..c6ba48dc6375f 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.20.0-dev
+1.21.0-dev
diff --git a/api/API_VERSIONING.md b/api/API_VERSIONING.md
index f864619602a1d..49e1bae1ec242 100644
--- a/api/API_VERSIONING.md
+++ b/api/API_VERSIONING.md
@@ -72,8 +72,10 @@ An exception to the above policy exists for:
or message has not been included in an Envoy release.
* API versions tagged `vNalpha`. Within an alpha major version, arbitrary breaking changes are allowed.
* Any field, message or enum with a `[#not-implemented-hide:..` comment.
-* Any proto with a `(udpa.annotations.file_status).work_in_progress` option annotation.
-* Any proto marked as [#alpha:].
+* Any proto with a `(udpa.annotations.file_status).work_in_progress`,
+ `(xds.annotations.v3.file_status).work_in_progress`
+ `(xds.annotations.v3.message_status).work_in_progress`, or
+ `(xds.annotations.v3.field_status).work_in_progress` option annotation.
Note that changes to default values for wrapped types, e.g. `google.protobuf.UInt32Value` are not
governed by the above policy. Any management server requiring stability across Envoy API or
diff --git a/api/BUILD b/api/BUILD
index 93f9184a2b400..d8cffd4a48f78 100644
--- a/api/BUILD
+++ b/api/BUILD
@@ -64,10 +64,14 @@ proto_library(
"//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg",
"//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg",
"//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg",
+ "//contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha:pkg",
+ "//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg",
+ "//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg",
"//envoy/admin/v3:pkg",
"//envoy/config/accesslog/v3:pkg",
"//envoy/config/bootstrap/v3:pkg",
"//envoy/config/cluster/v3:pkg",
+ "//envoy/config/common/key_value/v3:pkg",
"//envoy/config/common/matcher/v3:pkg",
"//envoy/config/core/v3:pkg",
"//envoy/config/endpoint/v3:pkg",
@@ -93,15 +97,14 @@ proto_library(
"//envoy/data/tap/v3:pkg",
"//envoy/extensions/access_loggers/file/v3:pkg",
"//envoy/extensions/access_loggers/grpc/v3:pkg",
- "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg",
+ "//envoy/extensions/access_loggers/open_telemetry/v3:pkg",
"//envoy/extensions/access_loggers/stream/v3:pkg",
"//envoy/extensions/access_loggers/wasm/v3:pkg",
- "//envoy/extensions/cache/simple_http_cache/v3alpha:pkg",
+ "//envoy/extensions/cache/simple_http_cache/v3:pkg",
"//envoy/extensions/clusters/aggregate/v3:pkg",
"//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg",
"//envoy/extensions/clusters/redis/v3:pkg",
"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg",
- "//envoy/extensions/common/key_value/v3:pkg",
"//envoy/extensions/common/matching/v3:pkg",
"//envoy/extensions/common/ratelimit/v3:pkg",
"//envoy/extensions/common/tap/v3:pkg",
@@ -113,14 +116,14 @@ proto_library(
"//envoy/extensions/filters/common/fault/v3:pkg",
"//envoy/extensions/filters/common/matcher/action/v3:pkg",
"//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg",
- "//envoy/extensions/filters/http/admission_control/v3alpha:pkg",
+ "//envoy/extensions/filters/http/admission_control/v3:pkg",
"//envoy/extensions/filters/http/alternate_protocols_cache/v3:pkg",
"//envoy/extensions/filters/http/aws_lambda/v3:pkg",
"//envoy/extensions/filters/http/aws_request_signing/v3:pkg",
- "//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg",
+ "//envoy/extensions/filters/http/bandwidth_limit/v3:pkg",
"//envoy/extensions/filters/http/buffer/v3:pkg",
- "//envoy/extensions/filters/http/cache/v3alpha:pkg",
- "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg",
+ "//envoy/extensions/filters/http/cache/v3:pkg",
+ "//envoy/extensions/filters/http/cdn_loop/v3:pkg",
"//envoy/extensions/filters/http/composite/v3:pkg",
"//envoy/extensions/filters/http/compressor/v3:pkg",
"//envoy/extensions/filters/http/cors/v3:pkg",
@@ -129,7 +132,7 @@ proto_library(
"//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg",
"//envoy/extensions/filters/http/dynamo/v3:pkg",
"//envoy/extensions/filters/http/ext_authz/v3:pkg",
- "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg",
+ "//envoy/extensions/filters/http/ext_proc/v3:pkg",
"//envoy/extensions/filters/http/fault/v3:pkg",
"//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg",
"//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg",
@@ -144,7 +147,7 @@ proto_library(
"//envoy/extensions/filters/http/kill_request/v3:pkg",
"//envoy/extensions/filters/http/local_ratelimit/v3:pkg",
"//envoy/extensions/filters/http/lua/v3:pkg",
- "//envoy/extensions/filters/http/oauth2/v3alpha:pkg",
+ "//envoy/extensions/filters/http/oauth2/v3:pkg",
"//envoy/extensions/filters/http/on_demand/v3:pkg",
"//envoy/extensions/filters/http/original_src/v3:pkg",
"//envoy/extensions/filters/http/ratelimit/v3:pkg",
@@ -172,14 +175,14 @@ proto_library(
"//envoy/extensions/filters/network/rbac/v3:pkg",
"//envoy/extensions/filters/network/redis_proxy/v3:pkg",
"//envoy/extensions/filters/network/sni_cluster/v3:pkg",
- "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg",
+ "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg",
"//envoy/extensions/filters/network/tcp_proxy/v3:pkg",
"//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg",
"//envoy/extensions/filters/network/thrift_proxy/router/v3:pkg",
"//envoy/extensions/filters/network/thrift_proxy/v3:pkg",
"//envoy/extensions/filters/network/wasm/v3:pkg",
"//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg",
- "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg",
+ "//envoy/extensions/filters/udp/dns_filter/v3:pkg",
"//envoy/extensions/filters/udp/udp_proxy/v3:pkg",
"//envoy/extensions/formatter/metadata/v3:pkg",
"//envoy/extensions/formatter/req_without_query/v3:pkg",
@@ -198,6 +201,7 @@ proto_library(
"//envoy/extensions/quic/crypto_stream/v3:pkg",
"//envoy/extensions/quic/proof_source/v3:pkg",
"//envoy/extensions/rate_limit_descriptors/expr/v3:pkg",
+ "//envoy/extensions/rbac/matchers/upstream_ip_port/v3:pkg",
"//envoy/extensions/request_id/uuid/v3:pkg",
"//envoy/extensions/resource_monitors/fixed_heap/v3:pkg",
"//envoy/extensions/resource_monitors/injected_resource/v3:pkg",
@@ -211,7 +215,7 @@ proto_library(
"//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg",
"//envoy/extensions/transport_sockets/quic/v3:pkg",
"//envoy/extensions/transport_sockets/raw_buffer/v3:pkg",
- "//envoy/extensions/transport_sockets/s2a/v3alpha:pkg",
+ "//envoy/extensions/transport_sockets/s2a/v3:pkg",
"//envoy/extensions/transport_sockets/starttls/v3:pkg",
"//envoy/extensions/transport_sockets/tap/v3:pkg",
"//envoy/extensions/transport_sockets/tls/v3:pkg",
@@ -221,14 +225,14 @@ proto_library(
"//envoy/extensions/upstreams/http/v3:pkg",
"//envoy/extensions/upstreams/tcp/generic/v3:pkg",
"//envoy/extensions/wasm/v3:pkg",
- "//envoy/extensions/watchdog/profile_action/v3alpha:pkg",
+ "//envoy/extensions/watchdog/profile_action/v3:pkg",
"//envoy/service/accesslog/v3:pkg",
"//envoy/service/auth/v3:pkg",
"//envoy/service/cluster/v3:pkg",
"//envoy/service/discovery/v3:pkg",
"//envoy/service/endpoint/v3:pkg",
"//envoy/service/event_reporting/v3:pkg",
- "//envoy/service/ext_proc/v3alpha:pkg",
+ "//envoy/service/ext_proc/v3:pkg",
"//envoy/service/extension/v3:pkg",
"//envoy/service/health/v3:pkg",
"//envoy/service/listener/v3:pkg",
@@ -246,7 +250,7 @@ proto_library(
"//envoy/type/metadata/v3:pkg",
"//envoy/type/tracing/v3:pkg",
"//envoy/type/v3:pkg",
- "//envoy/watchdog/v3alpha:pkg",
+ "//envoy/watchdog/v3:pkg",
],
)
diff --git a/api/STYLE.md b/api/STYLE.md
index b185be97c9687..5689d1162a08e 100644
--- a/api/STYLE.md
+++ b/api/STYLE.md
@@ -34,10 +34,13 @@ In addition, the following conventions should be followed:
implementation. These indicate that the entity is not implemented in Envoy and the entity
should be hidden from the Envoy documentation.
-* Use a `[#alpha:]` annotation in comments for messages that are considered alpha
- and are not subject to the threat model. This is similar to the work-in-progress/alpha tagging
- of extensions described below, but allows tagging messages that are used as part of the core API
- as alpha without having to break it into its own file.
+* Use a `(xds.annotations.v3.file_status).work_in_progress`,
+ `(xds.annotations.v3.message_status).work_in_progress`, or
+ `(xds.annotations.v3.field_status).work_in_progress` option annotation for files,
+ messages, or fields, respectively, that are considered work in progress and are not subject to the
+ threat model or the breaking change policy. This is similar to the work-in-progress/alpha tagging
+ of extensions described below, but allows tagging protos that are used as part of the core API
+ as work in progress without having to break them into their own file.
* Always use plural field names for `repeated` fields, such as `filters`.
@@ -144,9 +147,8 @@ To add an extension config to the API, the steps below should be followed:
(`option (udpa.annotations.file_status).package_version_status = ACTIVE;`).
This is required to automatically include the config proto in [api/versioning/BUILD](versioning/BUILD).
1. Add a reference to the v3 extension config in (1) in [api/versioning/BUILD](versioning/BUILD) under `active_protos`.
-1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file,
- reformat `foobar.proto` as needed and also generate the shadow API protos.
-1. `git add api/ generated_api_shadow/` to add any new files to your Git index.
+1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file and
+ reformat `foobar.proto` as needed.
## API annotations
diff --git a/api/bazel/BUILD b/api/bazel/BUILD
index 0e5c8aea75b01..a8b7b161067fd 100644
--- a/api/bazel/BUILD
+++ b/api/bazel/BUILD
@@ -1,4 +1,6 @@
load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler")
+load("//:utils.bzl", "json_data")
+load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC")
licenses(["notice"]) # Apache 2
@@ -15,3 +17,8 @@ go_proto_compiler(
valid_archive = False,
visibility = ["//visibility:public"],
)
+
+json_data(
+ name = "repository_locations",
+ data = REPOSITORY_LOCATIONS_SPEC,
+)
diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl
index be1e9c9789e4b..0e1a19f1b8ddd 100644
--- a/api/bazel/repository_locations.bzl
+++ b/api/bazel/repository_locations.bzl
@@ -4,9 +4,9 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "bazel-skylib",
project_desc = "Common useful functions and rules for Bazel",
project_url = "https://github.com/bazelbuild/bazel-skylib",
- version = "1.0.3",
- sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
- release_date = "2020-08-27",
+ version = "1.1.1",
+ sha256 = "c6966ec828da198c5d9adbaa94c05e3a1c7f21bd012a0b29ba8ddbccb2c93b0d",
+ release_date = "2021-09-27",
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"],
use_category = ["api"],
),
@@ -32,9 +32,9 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Bazel build tools",
project_desc = "Developer tools for working with Google's bazel buildtool.",
project_url = "https://github.com/bazelbuild/buildtools",
- version = "4.0.1",
- sha256 = "c28eef4d30ba1a195c6837acf6c75a4034981f5b4002dda3c5aa6e48ce023cf1",
- release_date = "2021-03-01",
+ version = "4.2.2",
+ sha256 = "ae34c344514e08c23e90da0e2d6cb700fcd28e80c02e23e4d5715dddcb42f7b3",
+ release_date = "2021-10-07",
strip_prefix = "buildtools-{version}",
urls = ["https://github.com/bazelbuild/buildtools/archive/{version}.tar.gz"],
use_category = ["api"],
@@ -44,9 +44,9 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_desc = "xDS API Working Group (xDS-WG)",
project_url = "https://github.com/cncf/xds",
# During the UDPA -> xDS migration, we aren't working with releases.
- version = "dd25fe81a44506ab21ea666fb70b3b1c4bb183ee",
- sha256 = "9184235cd31272679e4c7f9232c341d4ea75351ded74d3fbba28b05c290bfa71",
- release_date = "2021-07-22",
+ version = "c0841ac0dd72f6d26903f7e68fa64bd038533ba5",
+ sha256 = "ddd12de0fab2356db6c353e2ae75a21d83712c869aeb0ec73b215ca3eba9ee77",
+ release_date = "2021-10-07",
strip_prefix = "xds-{version}",
urls = ["https://github.com/cncf/xds/archive/{version}.tar.gz"],
use_category = ["api"],
@@ -100,20 +100,20 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Protobuf Rules for Bazel",
project_desc = "Protocol buffer rules for Bazel",
project_url = "https://github.com/bazelbuild/rules_proto",
- version = "f7a30f6f80006b591fa7c437fe5a951eb10bcbcf",
- sha256 = "9fc210a34f0f9e7cc31598d109b5d069ef44911a82f507d5a88716db171615a8",
- release_date = "2021-02-09",
+ version = "4.0.0",
+ sha256 = "66bfdf8782796239d3875d37e7de19b1d94301e8972b3cbd2446b332429b4df1",
+ release_date = "2021-09-15",
strip_prefix = "rules_proto-{version}",
- urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"],
+ urls = ["https://github.com/bazelbuild/rules_proto/archive/refs/tags/{version}.tar.gz"],
use_category = ["api"],
),
opentelemetry_proto = dict(
project_name = "OpenTelemetry Proto",
project_desc = "Language Independent Interface Types For OpenTelemetry",
project_url = "https://github.com/open-telemetry/opentelemetry-proto",
- version = "0.9.0",
- sha256 = "9ec38ab51eedbd7601979b0eda962cf37bc8a4dc35fcef604801e463f01dcc00",
- release_date = "2021-05-12",
+ version = "0.11.0",
+ sha256 = "985367f8905e91018e636cbf0d83ab3f834b665c4f5899a27d10cae9657710e2",
+ release_date = "2021-10-07",
strip_prefix = "opentelemetry-proto-{version}",
urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/v{version}.tar.gz"],
use_category = ["api"],
diff --git a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD
index ee92fb652582e..ec1e778e06e5c 100644
--- a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD
+++ b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD
@@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
licenses(["notice"]) # Apache 2
api_proto_package(
- deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"],
+ deps = [
+ "@com_github_cncf_udpa//udpa/annotations:pkg",
+ "@com_github_cncf_udpa//xds/annotations/v3:pkg",
+ ],
)
diff --git a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto
index 03a6522852ab5..88fd46c3a8569 100644
--- a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto
+++ b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
package envoy.extensions.filters.network.kafka_mesh.v3alpha;
+import "xds/annotations/v3/status.proto";
+
import "udpa/annotations/status.proto";
import "validate/validate.proto";
option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_mesh.v3alpha";
option java_outer_classname = "KafkaMeshProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
+option (xds.annotations.v3.file_status).work_in_progress = true;
// [#protodoc-title: Kafka Mesh]
// Kafka Mesh :ref:`configuration overview `.
diff --git a/api/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD b/api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/BUILD
similarity index 100%
rename from api/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD
rename to api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/BUILD
diff --git a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto b/api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.proto
similarity index 50%
rename from generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto
rename to api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.proto
index 5463ab6513bee..4b7accacf406f 100644
--- a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto
+++ b/api/contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha/router.proto
@@ -1,17 +1,16 @@
syntax = "proto3";
-package envoy.config.filter.thrift.router.v2alpha1;
+package envoy.extensions.filters.network.sip_proxy.router.v3alpha;
import "udpa/annotations/status.proto";
-option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1";
+option java_package = "io.envoyproxy.envoy.extensions.filters.network.sip_proxy.router.v3alpha";
option java_outer_classname = "RouterProto";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Router]
-// Thrift router :ref:`configuration overview `.
-// [#extension: envoy.filters.thrift.router]
+// [#extension: envoy.filters.sip.router]
message Router {
}
diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/BUILD
similarity index 100%
rename from api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD
rename to api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/BUILD
diff --git a/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.proto b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.proto
new file mode 100644
index 0000000000000..03c17a8ede82e
--- /dev/null
+++ b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.proto
@@ -0,0 +1,49 @@
+syntax = "proto3";
+
+package envoy.extensions.filters.network.sip_proxy.v3alpha;
+
+import "udpa/annotations/status.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.filters.network.sip_proxy.v3alpha";
+option java_outer_classname = "RouteProto";
+option java_multiple_files = true;
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: Sip Proxy Route Configuration]
+
+message RouteConfiguration {
+ // The name of the route configuration. Reserved for future use in asynchronous route discovery.
+ string name = 1;
+
+ // The list of routes that will be matched, in order, against incoming requests. The first route
+ // that matches will be used.
+ repeated Route routes = 2;
+}
+
+message Route {
+ // Route matching parameters.
+ RouteMatch match = 1 [(validate.rules).message = {required: true}];
+
+ // Route request to some upstream cluster.
+ RouteAction route = 2 [(validate.rules).message = {required: true}];
+}
+
+message RouteMatch {
+ oneof match_specifier {
+ option (validate.required) = true;
+
+ // The domain from Request URI or Route Header.
+ string domain = 1;
+ }
+}
+
+message RouteAction {
+ oneof cluster_specifier {
+ option (validate.required) = true;
+
+ // Indicates a single upstream cluster to which the request should be routed
+ // to.
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
+ }
+}
diff --git a/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.proto b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.proto
new file mode 100644
index 0000000000000..380ee714f40c2
--- /dev/null
+++ b/api/contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/sip_proxy.proto
@@ -0,0 +1,108 @@
+syntax = "proto3";
+
+package envoy.extensions.filters.network.sip_proxy.v3alpha;
+
+import "contrib/envoy/extensions/filters/network/sip_proxy/v3alpha/route.proto";
+
+import "google/protobuf/any.proto";
+import "google/protobuf/duration.proto";
+
+import "udpa/annotations/status.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.filters.network.sip_proxy.v3alpha";
+option java_outer_classname = "SipProxyProto";
+option java_multiple_files = true;
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: Sip Proxy]
+// [#extension: envoy.filters.network.sip_proxy]
+
+message SipProxy {
+ message SipSettings {
+ // transaction timeout timer [Timer B] unit is milliseconds, default value 64*T1.
+ //
+ // Session Initiation Protocol (SIP) timer summary
+ //
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer | Default value | Section | Meaning |
+ // +=========+=========================+==========+==============================================================================+
+ // | T1 | 500 ms | 17.1.1.1 | Round-trip time (RTT) estimate |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | T2 | 4 sec | 17.1.2.2 | Maximum re-transmission interval for non-INVITE requests and INVITE responses|
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | T4 | 5 sec | 17.1.2.2 | Maximum duration that a message can remain in the network |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer A | initially T1 | 17.1.1.2 | INVITE request re-transmission interval, for UDP only |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer B | 64*T1 | 17.1.1.2 | INVITE transaction timeout timer |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer D | > 32 sec. for UDP | 17.1.1.2 | Wait time for response re-transmissions |
+ // | | 0 sec. for TCP and SCTP | | |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer E | initially T1 | 17.1.2.2 | Non-INVITE request re-transmission interval, UDP only |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer F | 64*T1 | 17.1.2.2 | Non-INVITE transaction timeout timer |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer G | initially T1 | 17.2.1 | INVITE response re-transmission interval |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer H | 64*T1 | 17.2.1 | Wait time for ACK receipt |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer I | T4 for UDP | 17.2.1 | Wait time for ACK re-transmissions |
+ // | | 0 sec. for TCP and SCTP | | |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer J | 64*T1 for UDP | 17.2.2 | Wait time for re-transmissions of non-INVITE requests |
+ // | | 0 sec. for TCP and SCTP | | |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ // | Timer K | T4 for UDP | 17.1.2.2 | Wait time for response re-transmissions |
+ // | | 0 sec. for TCP and SCTP | | |
+ // +---------+-------------------------+----------+------------------------------------------------------------------------------+
+ google.protobuf.Duration transaction_timeout = 1;
+
+ // own domain name
+ string own_domain = 2;
+
+ // points to domain match with own_domain
+ string domain_match_parameter_name = 3;
+ }
+
+ // The human readable prefix to use when emitting statistics.
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
+
+ // The route table for the connection manager is static and is specified in this property.
+ RouteConfiguration route_config = 2;
+
+ // A list of individual Sip filters that make up the filter chain for requests made to the
+ // Sip proxy. Order matters as the filters are processed sequentially. For backwards
+ // compatibility, if no sip_filters are specified, a default Sip router filter
+ // (`envoy.filters.sip.router`) is used.
+ // [#extension-category: envoy.sip_proxy.filters]
+ repeated SipFilter sip_filters = 3;
+
+ SipSettings settings = 4;
+}
+
+// SipFilter configures a Sip filter.
+message SipFilter {
+ // The name of the filter to instantiate. The name must match a supported
+ // filter. The built-in filters are:
+ //
+ string name = 1 [(validate.rules).string = {min_len: 1}];
+
+ // Filter specific configuration which depends on the filter being instantiated. See the supported
+ // filters for further documentation.
+ oneof config_type {
+ google.protobuf.Any typed_config = 3;
+ }
+}
+
+// SipProtocolOptions specifies Sip upstream protocol options. This object is used in
+// :ref:`typed_extension_protocol_options`,
+// keyed by the name `envoy.filters.network.sip_proxy`.
+message SipProtocolOptions {
+ // All sip messages in one dialog should go to the same endpoint.
+ bool session_affinity = 1;
+
+ // The Register with Authorization header should go to the same endpoint which send out the 401 Unauthorized.
+ bool registration_affinity = 2;
+}
diff --git a/api/envoy/extensions/common/key_value/v3/BUILD b/api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/BUILD
similarity index 100%
rename from api/envoy/extensions/common/key_value/v3/BUILD
rename to api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/BUILD
diff --git a/api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.proto b/api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.proto
new file mode 100644
index 0000000000000..aa2d8cd2fb823
--- /dev/null
+++ b/api/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.proto
@@ -0,0 +1,44 @@
+syntax = "proto3";
+
+package envoy.extensions.private_key_providers.cryptomb.v3alpha;
+
+import "envoy/config/core/v3/base.proto";
+
+import "google/protobuf/duration.proto";
+
+import "udpa/annotations/sensitive.proto";
+import "udpa/annotations/status.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.private_key_providers.cryptomb.v3alpha";
+option java_outer_classname = "CryptombProto";
+option java_multiple_files = true;
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: CryptoMb private key provider]
+// [#extension: envoy.tls.key_providers.cryptomb]
+
+// A CryptoMbPrivateKeyMethodConfig message specifies how the CryptoMb private
+// key provider is configured. The private key provider provides `SIMD`
+// processing for RSA sign and decrypt operations (ECDSA signing uses regular
+// BoringSSL functions). The provider works by gathering the operations into a
+// worker-thread specific queue, and processing the queue using `ipp-crypto`
+// library when the queue is full or when a timer expires.
+// [#extension-category: envoy.tls.key_providers]
+message CryptoMbPrivateKeyMethodConfig {
+ // Private key to use in the private key provider. If set to inline_bytes or
+ // inline_string, the value needs to be the private key in PEM format.
+ config.core.v3.DataSource private_key = 1 [(udpa.annotations.sensitive) = true];
+
+ // How long to wait until the per-thread processing queue should be
+ // processed. If the processing queue gets full (eight sign or decrypt
+ // requests are received) it is processed immediately. However, if the
+ // queue is not filled before the delay has expired, the requests
+ // already in the queue are processed, even if the queue is not full.
+ // In effect, this value controls the balance between latency and
+ // throughput. The duration needs to be set to a non-zero value.
+ google.protobuf.Duration poll_delay = 2 [(validate.rules).duration = {
+ required: true
+ gt {}
+ }];
+}
diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto
index d6213d6fe9488..bcedfa509818e 100644
--- a/api/envoy/config/cluster/v3/cluster.proto
+++ b/api/envoy/config/cluster/v3/cluster.proto
@@ -43,7 +43,7 @@ message ClusterCollection {
}
// Configuration for a single upstream cluster.
-// [#next-free-field: 56]
+// [#next-free-field: 57]
message Cluster {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster";
@@ -123,15 +123,23 @@ message Cluster {
// only perform a lookup for addresses in the IPv6 family. If AUTO is
// specified, the DNS resolver will first perform a lookup for addresses in
// the IPv6 family and fallback to a lookup for addresses in the IPv4 family.
+ // This is semantically equivalent to a non-existent V6_PREFERRED option.
+ // AUTO is a legacy name that is more opaque than
+ // necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API.
+ // If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the
+ // IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback
+ // target will only get v6 addresses if there were NO v4 addresses to return.
// For cluster types other than
// :ref:`STRICT_DNS` and
// :ref:`LOGICAL_DNS`,
// this setting is
// ignored.
+ // [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.]
enum DnsLookupFamily {
AUTO = 0;
V4_ONLY = 1;
V6_ONLY = 2;
+ V4_PREFERRED = 3;
}
enum ClusterProtocolSelection {
@@ -337,6 +345,35 @@ message Cluster {
bool list_as_any = 7;
}
+ // Configuration for :ref:`slow start mode `.
+ message SlowStartConfig {
+ // Represents the size of slow start window.
+ // If set, the newly created host remains in slow start mode starting from its creation time
+ // for the duration of slow start window.
+ google.protobuf.Duration slow_start_window = 1;
+
+ // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0,
+ // so that endpoint would get linearly increasing amount of traffic.
+ // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly.
+ // The value of aggression parameter should be greater than 0.0.
+ // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve.
+ //
+ // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression:
+ // `new_weight = weight * time_factor ^ (1 / aggression)`,
+ // where `time_factor=(time_since_start_seconds / slow_start_time_seconds)`.
+ //
+ // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window.
+ // Once host exits slow start, time_factor and aggression no longer affect its weight.
+ core.v3.RuntimeDouble aggression = 2;
+ }
+
+ // Specific configuration for the RoundRobin load balancing policy.
+ message RoundRobinLbConfig {
+ // Configuration for slow start mode.
+ // If this configuration is not set, slow start will not be not enabled.
+ SlowStartConfig slow_start_config = 1;
+ }
+
// Specific configuration for the LeastRequest load balancing policy.
message LeastRequestLbConfig {
option (udpa.annotations.versioning).previous_message_type =
@@ -370,6 +407,10 @@ message Cluster {
// .. note::
// This setting only takes effect if all host weights are not equal.
core.v3.RuntimeDouble active_request_bias = 2;
+
+ // Configuration for slow start mode.
+ // If this configuration is not set, slow start will not be not enabled.
+ SlowStartConfig slow_start_config = 3;
}
// Specific configuration for the :ref:`RingHash`
@@ -951,6 +992,9 @@ message Cluster {
// Optional configuration for the LeastRequest load balancing policy.
LeastRequestLbConfig least_request_lb_config = 37;
+
+ // Optional configuration for the RoundRobin load balancing policy.
+ RoundRobinLbConfig round_robin_lb_config = 56;
}
// Common configuration for all load balancer implementations.
diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD b/api/envoy/config/common/key_value/v3/BUILD
similarity index 84%
rename from api/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD
rename to api/envoy/config/common/key_value/v3/BUILD
index 1c1a6f6b44235..e9b556d681cfd 100644
--- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD
+++ b/api/envoy/config/common/key_value/v3/BUILD
@@ -8,5 +8,6 @@ api_proto_package(
deps = [
"//envoy/config/core/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
+ "@com_github_cncf_udpa//xds/annotations/v3:pkg",
],
)
diff --git a/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto b/api/envoy/config/common/key_value/v3/config.proto
similarity index 60%
rename from generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto
rename to api/envoy/config/common/key_value/v3/config.proto
index 66a55435437b3..8d62c09863083 100644
--- a/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto
+++ b/api/envoy/config/common/key_value/v3/config.proto
@@ -1,22 +1,25 @@
syntax = "proto3";
-package envoy.extensions.common.key_value.v3;
+package envoy.config.common.key_value.v3;
import "envoy/config/core/v3/extension.proto";
+import "xds/annotations/v3/status.proto";
+
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.common.key_value.v3";
+option java_package = "io.envoyproxy.envoy.config.common.key_value.v3";
option java_outer_classname = "ConfigProto";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Key Value Store storage plugin]
-// [#alpha:]
// This shared configuration for Envoy key value stores.
message KeyValueStoreConfig {
+ option (xds.annotations.v3.message_status).work_in_progress = true;
+
// [#extension-category: envoy.common.key_value]
- config.core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}];
+ core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}];
}
diff --git a/api/envoy/config/common/matcher/v3/BUILD b/api/envoy/config/common/matcher/v3/BUILD
index 2f90ace882d93..221350b756d44 100644
--- a/api/envoy/config/common/matcher/v3/BUILD
+++ b/api/envoy/config/common/matcher/v3/BUILD
@@ -10,5 +10,6 @@ api_proto_package(
"//envoy/config/route/v3:pkg",
"//envoy/type/matcher/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
+ "@com_github_cncf_udpa//xds/annotations/v3:pkg",
],
)
diff --git a/api/envoy/config/common/matcher/v3/matcher.proto b/api/envoy/config/common/matcher/v3/matcher.proto
index d7deb71d0b469..1fb8c83ec3ef7 100644
--- a/api/envoy/config/common/matcher/v3/matcher.proto
+++ b/api/envoy/config/common/matcher/v3/matcher.proto
@@ -6,6 +6,8 @@ import "envoy/config/core/v3/extension.proto";
import "envoy/config/route/v3/route_components.proto";
import "envoy/type/matcher/v3/string.proto";
+import "xds/annotations/v3/status.proto";
+
import "udpa/annotations/status.proto";
import "validate/validate.proto";
@@ -21,9 +23,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// is found the action specified by the most specific on_no_match will be evaluated.
// As an on_no_match might result in another matching tree being evaluated, this process
// might repeat several times until the final OnMatch (or no match) is decided.
-//
-// [#alpha:]
message Matcher {
+ option (xds.annotations.v3.message_status).work_in_progress = true;
+
// What to do if a match is successful.
message OnMatch {
oneof on_match {
diff --git a/api/envoy/config/core/v3/BUILD b/api/envoy/config/core/v3/BUILD
index 72e10b6df8440..3fbb6b0e1f186 100644
--- a/api/envoy/config/core/v3/BUILD
+++ b/api/envoy/config/core/v3/BUILD
@@ -11,6 +11,7 @@ api_proto_package(
"//envoy/type/matcher/v3:pkg",
"//envoy/type/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
+ "@com_github_cncf_udpa//xds/annotations/v3:pkg",
"@com_github_cncf_udpa//xds/core/v3:pkg",
],
)
diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto
index d6c507b8dec9a..efa8ec5186f46 100644
--- a/api/envoy/config/core/v3/base.proto
+++ b/api/envoy/config/core/v3/base.proto
@@ -296,6 +296,15 @@ message RuntimeFeatureFlag {
string runtime_key = 2 [(validate.rules).string = {min_len: 1}];
}
+// Query parameter name/value pair.
+message QueryParameter {
+ // The key of the query parameter. Case sensitive.
+ string key = 1 [(validate.rules).string = {min_len: 1}];
+
+ // The value of the query parameter.
+ string value = 2;
+}
+
// Header name/value pair.
message HeaderValue {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValue";
@@ -320,12 +329,33 @@ message HeaderValueOption {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.core.HeaderValueOption";
+ // Describes the supported actions types for header append action.
+ enum HeaderAppendAction {
+ // This action will append the specified value to the existing values if the header
+ // already exists. If the header doesn't exist then this will add the header with
+ // specified key and value.
+ APPEND_IF_EXISTS_OR_ADD = 0;
+
+ // This action will add the header if it doesn't already exist. If the header
+ // already exists then this will be a no-op.
+ ADD_IF_ABSENT = 1;
+
+ // This action will overwrite the specified value by discarding any existing values if
+ // the header already exists. If the header doesn't exist then this will add the header
+ // with specified key and value.
+ OVERWRITE_IF_EXISTS_OR_ADD = 2;
+ }
+
// Header name/value pair that this option applies to.
HeaderValue header = 1 [(validate.rules).message = {required: true}];
// Should the value be appended? If true (default), the value is appended to
// existing values. Otherwise it replaces any existing values.
google.protobuf.BoolValue append = 2;
+
+ // [#not-implemented-hide:] Describes the action taken to append/overwrite the given value for an existing header
+ // or to only add this header if it's absent. Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD`.
+ HeaderAppendAction append_action = 3 [(validate.rules).enum = {defined_only: true}];
}
// Wrapper for a set of headers.
diff --git a/api/envoy/config/core/v3/health_check.proto b/api/envoy/config/core/v3/health_check.proto
index 304297e7c011c..81a9b6c7f1535 100644
--- a/api/envoy/config/core/v3/health_check.proto
+++ b/api/envoy/config/core/v3/health_check.proto
@@ -73,7 +73,7 @@ message HealthCheck {
}
}
- // [#next-free-field: 12]
+ // [#next-free-field: 13]
message HttpHealthCheck {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.core.HealthCheck.HttpHealthCheck";
@@ -118,6 +118,18 @@ message HealthCheck {
// range are required. Only statuses in the range [100, 600) are allowed.
repeated type.v3.Int64Range expected_statuses = 9;
+ // Specifies a list of HTTP response statuses considered retriable. If provided, responses in this range
+ // will count towards the configured :ref:`unhealthy_threshold `,
+ // but will not result in the host being considered immediately unhealthy. Ranges follow half-open semantics of
+ // :ref:`Int64Range `. The start and end of each range are required.
+ // Only statuses in the range [100, 600) are allowed. The :ref:`expected_statuses `
+ // field takes precedence for any range overlaps with this field i.e. if status code 200 is both retriable and expected, a 200 response will
+ // be considered a successful health check. By default all responses not in
+ // :ref:`expected_statuses ` will result in
+ // the host being considered immediately unhealthy i.e. if status code 200 is expected and there are no configured retriable statuses, any
+ // non-200 response will result in the host being marked unhealthy.
+ repeated type.v3.Int64Range retriable_statuses = 12;
+
// Use specified application protocol for health checks.
type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}];
@@ -243,8 +255,10 @@ message HealthCheck {
uint32 interval_jitter_percent = 18;
// The number of unhealthy health checks required before a host is marked
- // unhealthy. Note that for *http* health checking if a host responds with 503
- // this threshold is ignored and the host is considered unhealthy immediately.
+ // unhealthy. Note that for *http* health checking if a host responds with a code not in
+ // :ref:`expected_statuses `
+ // or :ref:`retriable_statuses `,
+ // this threshold is ignored and the host is considered immediately unhealthy.
google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}];
// The number of healthy health checks required before a host is marked
diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto
index 8f2347eb55179..4535b16667388 100644
--- a/api/envoy/config/core/v3/protocol.proto
+++ b/api/envoy/config/core/v3/protocol.proto
@@ -8,6 +8,8 @@ import "envoy/type/v3/percent.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/wrappers.proto";
+import "xds/annotations/v3/status.proto";
+
import "envoy/annotations/deprecation.proto";
import "udpa/annotations/status.proto";
import "udpa/annotations/versioning.proto";
@@ -60,15 +62,26 @@ message UpstreamHttpProtocolOptions {
"envoy.api.v2.core.UpstreamHttpProtocolOptions";
// Set transport socket `SNI `_ for new
- // upstream connections based on the downstream HTTP host/authority header, as seen by the
- // :ref:`router filter `.
+ // upstream connections based on the downstream HTTP host/authority header or any other arbitrary
+ // header when :ref:`override_auto_sni_header `
+ // is set, as seen by the :ref:`router filter `.
bool auto_sni = 1;
// Automatic validate upstream presented certificate for new upstream connections based on the
- // downstream HTTP host/authority header, as seen by the
- // :ref:`router filter `.
- // This field is intended to set with `auto_sni` field.
+ // downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header `
+ // is set, as seen by the :ref:`router filter `.
+ // This field is intended to be set with `auto_sni` field.
bool auto_san_validation = 2;
+
+ // An optional alternative to the host/authority header to be used for setting the SNI value.
+ // It should be a valid downstream HTTP header, as seen by the
+ // :ref:`router filter `.
+ // If unset, host/authority header will be used for populating the SNI. If the specified header
+ // is not found or the value is empty, host/authority header will be used instead.
+ // This field is intended to be set with `auto_sni` and/or `auto_san_validation` fields.
+ // If none of these fields are set then setting this would be a no-op.
+ string override_auto_sni_header = 3
+ [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}];
}
// Configures the alternate protocols cache which tracks alternate protocols that can be used to
@@ -91,6 +104,12 @@ message AlternateProtocolsCacheOptions {
// it is possible for the maximum entries in the cache to go slightly above the configured
// value depending on timing. This is similar to how other circuit breakers work.
google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}];
+
+ // Allows configuring a persistent
+ // :ref:`key value store ` to flush
+ // alternate protocols entries to disk.
+ // This function is currently only supported if concurrency is 1
+ TypedExtensionConfig key_value_store_config = 3;
}
// [#next-free-field: 7]
@@ -138,10 +157,11 @@ message HttpProtocolOptions {
// The maximum duration of a connection. The duration is defined as a period since a connection
// was established. If not set, there is no max duration. When max_connection_duration is reached
- // the connection will be closed. Drain sequence will occur prior to closing the connection if
- // if's applicable. See :ref:`drain_timeout
+ // and if there are no active streams, the connection will be closed. If there are any active streams,
+ // the drain sequence will kick-in, and the connection will be force-closed after the drain period.
+ // See :ref:`drain_timeout
// `.
- // Note: not implemented for upstream connections.
+ // Note: This feature is not yet implemented for the upstream connections.
google.protobuf.Duration max_connection_duration = 3;
// The maximum number of headers. If unconfigured, the default
@@ -473,6 +493,7 @@ message GrpcProtocolOptions {
}
// A message which allows using HTTP/3.
+// [#next-free-field: 6]
message Http3ProtocolOptions {
QuicProtocolOptions quic_protocol_options = 1;
@@ -483,6 +504,14 @@ message Http3ProtocolOptions {
// If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging
// `.
google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 2;
+
+ // Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using
+ // the header mechanisms from the `HTTP/2 extended connect RFC
+ // `_
+ // and settings `proposed for HTTP/3
+ // `_
+ // Note that HTTP/3 CONNECT is not yet an RFC.
+ bool allow_extended_connect = 5 [(xds.annotations.v3.field_status).work_in_progress = true];
}
// A message to control transformations to the :scheme header
diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
index 07044f92201e9..847e36f163ba2 100644
--- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
+++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
@@ -390,7 +390,8 @@ message FilterStateRule {
// A map of string keys to requirements. The string key is the string value
// in the FilterState with the name specified in the *name* field above.
- map requires = 3;
+ map
+ requires = 3;
}
// This is the Envoy HTTP filter config for JWT authentication.
diff --git a/api/envoy/config/rbac/v3/rbac.proto b/api/envoy/config/rbac/v3/rbac.proto
index d66f9be2b4981..474f30a285633 100644
--- a/api/envoy/config/rbac/v3/rbac.proto
+++ b/api/envoy/config/rbac/v3/rbac.proto
@@ -3,6 +3,7 @@ syntax = "proto3";
package envoy.config.rbac.v3;
import "envoy/config/core/v3/address.proto";
+import "envoy/config/core/v3/extension.proto";
import "envoy/config/route/v3/route_components.proto";
import "envoy/type/matcher/v3/metadata.proto";
import "envoy/type/matcher/v3/path.proto";
@@ -146,7 +147,7 @@ message Policy {
}
// Permission defines an action (or actions) that a principal can take.
-// [#next-free-field: 12]
+// [#next-free-field: 13]
message Permission {
option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission";
@@ -218,6 +219,10 @@ message Permission {
// Please refer to :ref:`this FAQ entry ` to learn to
// setup SNI.
type.matcher.v3.StringMatcher requested_server_name = 9;
+
+ // Extension for configuring custom matchers for RBAC.
+ // [#extension-category: envoy.rbac.matchers]
+ core.v3.TypedExtensionConfig matcher = 12;
}
}
diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto
index e6be0c43ed0ac..d25edd756db5f 100644
--- a/api/envoy/config/route/v3/route_components.proto
+++ b/api/envoy/config/route/v3/route_components.proto
@@ -529,6 +529,14 @@ message RouteMatch {
// against all the specified query parameters. If the number of specified
// query parameters is nonzero, they all must match the *path* header's
// query string for a match to occur.
+ //
+ // .. note::
+ //
+ // If query parameters are used to pass request message fields when
+ // `grpc_json_transcoder `_
+ // is used, the transcoded message fields maybe different. The query parameters are
+ // url encoded, but the message fields are not. For example, if a query
+ // parameter is "foo%20bar", the message field will be "foo bar".
repeated QueryParameterMatcher query_parameters = 7;
// If specified, only gRPC requests will be matched. The router will check
@@ -1164,7 +1172,7 @@ message RouteAction {
}
// HTTP retry :ref:`architecture overview `.
-// [#next-free-field: 12]
+// [#next-free-field: 14]
message RetryPolicy {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy";
@@ -1305,8 +1313,8 @@ message RetryPolicy {
google.protobuf.UInt32Value num_retries = 2
[(udpa.annotations.field_migrate).rename = "max_retries"];
- // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The
- // same conditions documented for
+ // Specifies a non-zero upstream timeout per retry attempt (including the initial attempt). This
+ // parameter is optional. The same conditions documented for
// :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply.
//
// .. note::
@@ -1318,6 +1326,27 @@ message RetryPolicy {
// would have been exhausted.
google.protobuf.Duration per_try_timeout = 3;
+ // Specifies an upstream idle timeout per retry attempt (including the initial attempt). This
+ // parameter is optional and if absent there is no per try idle timeout. The semantics of the per
+ // try idle timeout are similar to the
+ // :ref:`route idle timeout ` and
+ // :ref:`stream idle timeout
+ // `
+ // both enforced by the HTTP connection manager. The difference is that this idle timeout
+ // is enforced by the router for each individual attempt and thus after all previous filters have
+ // run, as opposed to *before* all previous filters run for the other idle timeouts. This timeout
+ // is useful in cases in which total request timeout is bounded by a number of retries and a
+ // :ref:`per_try_timeout `, but
+ // there is a desire to ensure each try is making incremental progress. Note also that similar
+ // to :ref:`per_try_timeout `,
+ // this idle timeout does not start until after both the entire request has been received by the
+ // router *and* a connection pool connection has been obtained. Unlike
+ // :ref:`per_try_timeout `,
+ // the idle timer continues once the response starts streaming back to the downstream client.
+ // This ensures that response data continues to make progress without using one of the HTTP
+ // connection manager idle timeouts.
+ google.protobuf.Duration per_try_idle_timeout = 13;
+
// Specifies an implementation of a RetryPriority which is used to determine the
// distribution of load across priorities used for retries. Refer to
// :ref:`retry plugin configuration ` for more details.
@@ -1329,6 +1358,11 @@ message RetryPolicy {
// details.
repeated RetryHostPredicate retry_host_predicate = 5;
+ // Retry options predicates that will be applied prior to retrying a request. These predicates
+ // allow customizing request behavior between retries.
+ // [#comment: add [#extension-category: envoy.retry_options_predicates] when there are built-in extensions]
+ repeated core.v3.TypedExtensionConfig retry_options_predicates = 12;
+
// The maximum number of times host selection will be reattempted before giving up, at which
// point the host that was last selected will be routed to. If unspecified, this will default to
// retrying once.
diff --git a/api/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD b/api/envoy/extensions/access_loggers/open_telemetry/v3/BUILD
similarity index 100%
rename from api/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD
rename to api/envoy/extensions/access_loggers/open_telemetry/v3/BUILD
diff --git a/api/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto b/api/envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto
similarity index 93%
rename from api/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto
rename to api/envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto
index 1b7027133e153..cd4a63181290f 100644
--- a/api/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto
+++ b/api/envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package envoy.extensions.access_loggers.open_telemetry.v3alpha;
+package envoy.extensions.access_loggers.open_telemetry.v3;
import "envoy/extensions/access_loggers/grpc/v3/als.proto";
@@ -9,10 +9,9 @@ import "opentelemetry/proto/common/v1/common.proto";
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v3";
option java_outer_classname = "LogsServiceProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: OpenTelemetry (gRPC) Access Log]
diff --git a/api/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD b/api/envoy/extensions/cache/simple_http_cache/v3/BUILD
similarity index 100%
rename from api/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD
rename to api/envoy/extensions/cache/simple_http_cache/v3/BUILD
diff --git a/api/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto b/api/envoy/extensions/cache/simple_http_cache/v3/config.proto
similarity index 83%
rename from api/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto
rename to api/envoy/extensions/cache/simple_http_cache/v3/config.proto
index 1b42e9b3f93d4..e7bd7cdbdf91a 100644
--- a/api/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto
+++ b/api/envoy/extensions/cache/simple_http_cache/v3/config.proto
@@ -1,10 +1,10 @@
syntax = "proto3";
-package envoy.extensions.cache.simple_http_cache.v3alpha;
+package envoy.extensions.cache.simple_http_cache.v3;
import "udpa/annotations/status.proto";
-option java_package = "io.envoyproxy.envoy.extensions.cache.simple_http_cache.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.cache.simple_http_cache.v3";
option java_outer_classname = "ConfigProto";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD b/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD
index 6e07b4a9226bb..b9cc22c7ee67c 100644
--- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD
+++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD
@@ -8,8 +8,8 @@ api_proto_package(
deps = [
"//envoy/annotations:pkg",
"//envoy/config/cluster/v3:pkg",
+ "//envoy/config/common/key_value/v3:pkg",
"//envoy/config/core/v3:pkg",
- "//envoy/extensions/common/key_value/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
],
)
diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto
index 4a0d87ff6c3b8..e3904ae287192 100644
--- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto
+++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto
@@ -3,10 +3,10 @@ syntax = "proto3";
package envoy.extensions.common.dynamic_forward_proxy.v3;
import "envoy/config/cluster/v3/cluster.proto";
+import "envoy/config/common/key_value/v3/config.proto";
import "envoy/config/core/v3/address.proto";
import "envoy/config/core/v3/extension.proto";
import "envoy/config/core/v3/resolver.proto";
-import "envoy/extensions/common/key_value/v3/config.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/wrappers.proto";
@@ -142,5 +142,5 @@ message DnsCacheConfig {
// [#not-implemented-hide:]
// Configuration to flush the DNS cache to long term storage.
- key_value.v3.KeyValueStoreConfig key_value_config = 13;
+ config.common.key_value.v3.KeyValueStoreConfig key_value_config = 13;
}
diff --git a/api/envoy/extensions/common/key_value/v3/config.proto b/api/envoy/extensions/common/key_value/v3/config.proto
deleted file mode 100644
index 66a55435437b3..0000000000000
--- a/api/envoy/extensions/common/key_value/v3/config.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.common.key_value.v3;
-
-import "envoy/config/core/v3/extension.proto";
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.extensions.common.key_value.v3";
-option java_outer_classname = "ConfigProto";
-option java_multiple_files = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: Key Value Store storage plugin]
-
-// [#alpha:]
-// This shared configuration for Envoy key value stores.
-message KeyValueStoreConfig {
- // [#extension-category: envoy.common.key_value]
- config.core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}];
-}
diff --git a/api/envoy/extensions/common/matching/v3/BUILD b/api/envoy/extensions/common/matching/v3/BUILD
index 1afd4545d9608..de9e120297ac4 100644
--- a/api/envoy/extensions/common/matching/v3/BUILD
+++ b/api/envoy/extensions/common/matching/v3/BUILD
@@ -10,6 +10,7 @@ api_proto_package(
"//envoy/config/common/matcher/v3:pkg",
"//envoy/config/core/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
+ "@com_github_cncf_udpa//xds/annotations/v3:pkg",
"@com_github_cncf_udpa//xds/type/matcher/v3:pkg",
],
)
diff --git a/api/envoy/extensions/common/matching/v3/extension_matcher.proto b/api/envoy/extensions/common/matching/v3/extension_matcher.proto
index eee82a381633b..10bd3b7389a69 100644
--- a/api/envoy/extensions/common/matching/v3/extension_matcher.proto
+++ b/api/envoy/extensions/common/matching/v3/extension_matcher.proto
@@ -5,6 +5,7 @@ package envoy.extensions.common.matching.v3;
import "envoy/config/common/matcher/v3/matcher.proto";
import "envoy/config/core/v3/extension.proto";
+import "xds/annotations/v3/status.proto";
import "xds/type/matcher/v3/matcher.proto";
import "envoy/annotations/deprecation.proto";
@@ -21,9 +22,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// Wrapper around an existing extension that provides an associated matcher. This allows
// decorating an existing extension with a matcher, which can be used to match against
// relevant protocol data.
-//
-// [#alpha:]
message ExtensionWithMatcher {
+ option (xds.annotations.v3.message_status).work_in_progress = true;
+
// The associated matcher. This is deprecated in favor of xds_matcher.
config.common.matcher.v3.Matcher matcher = 1
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD b/api/envoy/extensions/filters/http/admission_control/v3/BUILD
similarity index 100%
rename from api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD
rename to api/envoy/extensions/filters/http/admission_control/v3/BUILD
diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/api/envoy/extensions/filters/http/admission_control/v3/admission_control.proto
similarity index 96%
rename from generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto
rename to api/envoy/extensions/filters/http/admission_control/v3/admission_control.proto
index 9bb3603f9ebd6..702f03019b1c4 100644
--- a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto
+++ b/api/envoy/extensions/filters/http/admission_control/v3/admission_control.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package envoy.extensions.filters.http.admission_control.v3alpha;
+package envoy.extensions.filters.http.admission_control.v3;
import "envoy/config/core/v3/base.proto";
import "envoy/type/v3/range.proto";
@@ -10,10 +10,9 @@ import "google/protobuf/duration.proto";
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3";
option java_outer_classname = "AdmissionControlProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Admission Control]
diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto
deleted file mode 100644
index 9bb3603f9ebd6..0000000000000
--- a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto
+++ /dev/null
@@ -1,103 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.filters.http.admission_control.v3alpha;
-
-import "envoy/config/core/v3/base.proto";
-import "envoy/type/v3/range.proto";
-
-import "google/protobuf/duration.proto";
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha";
-option java_outer_classname = "AdmissionControlProto";
-option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: Admission Control]
-// [#extension: envoy.filters.http.admission_control]
-
-// [#next-free-field: 8]
-message AdmissionControl {
- // Default method of specifying what constitutes a successful request. All status codes that
- // indicate a successful request must be explicitly specified if not relying on the default
- // values.
- message SuccessCriteria {
- message HttpCriteria {
- // Status code ranges that constitute a successful request. Configurable codes are in the
- // range [100, 600).
- repeated type.v3.Int32Range http_success_status = 1
- [(validate.rules).repeated = {min_items: 1}];
- }
-
- message GrpcCriteria {
- // Status codes that constitute a successful request.
- // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
- repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}];
- }
-
- // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful
- // responses.
- //
- // .. note::
- //
- // The default HTTP codes considered successful by the admission controller are done so due
- // to the unlikelihood that sending fewer requests would change their behavior (for example:
- // redirects, unauthorized access, or bad requests won't be alleviated by sending less
- // traffic).
- HttpCriteria http_criteria = 1;
-
- // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok,
- // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated,
- // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented.
- //
- // .. note::
- //
- // The default gRPC codes that are considered successful by the admission controller are
- // chosen because of the unlikelihood that sending fewer requests will change the behavior.
- GrpcCriteria grpc_criteria = 2;
- }
-
- // If set to false, the admission control filter will operate as a pass-through filter. If the
- // message is unspecified, the filter will be enabled.
- config.core.v3.RuntimeFeatureFlag enabled = 1;
-
- // Defines how a request is considered a success/failure.
- oneof evaluation_criteria {
- option (validate.required) = true;
-
- SuccessCriteria success_criteria = 2;
- }
-
- // The sliding time window over which the success rate is calculated. The window is rounded to the
- // nearest second. Defaults to 30s.
- google.protobuf.Duration sampling_window = 3;
-
- // Rejection probability is defined by the formula::
- //
- // max(0, (rq_count - rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression)
- //
- // The aggression dictates how heavily the admission controller will throttle requests upon SR
- // dropping at or below the threshold. A value of 1 will result in a linear increase in
- // rejection probability as SR drops. Any values less than 1.0, will be set to 1.0. If the
- // message is unspecified, the aggression is 1.0. See `the admission control documentation
- // `_
- // for a diagram illustrating this.
- config.core.v3.RuntimeDouble aggression = 4;
-
- // Dictates the success rate at which the rejection probability is non-zero. As success rate drops
- // below this threshold, rejection probability will increase. Any success rate above the threshold
- // results in a rejection probability of 0. Defaults to 95%.
- config.core.v3.RuntimePercent sr_threshold = 5;
-
- // If the average RPS of the sampling window is below this threshold, the request
- // will not be rejected, even if the success rate is lower than sr_threshold.
- // Defaults to 0.
- config.core.v3.RuntimeUInt32 rps_threshold = 6;
-
- // The probability of rejection will never exceed this value, even if the failure rate is rising.
- // Defaults to 80%.
- config.core.v3.RuntimePercent max_rejection_probability = 7;
-}
diff --git a/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto b/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto
index e628a6ca73fbb..0f0609b6e55ed 100644
--- a/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto
+++ b/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto
@@ -15,10 +15,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// Configuration for the alternate protocols cache HTTP filter.
// [#extension: envoy.filters.http.alternate_protocols_cache]
-// TODO(RyanTheOptimist): Move content from source/docs/http3_upstream.md to
-// docs/root/intro/arch_overview/upstream/connection_pooling.rst when unhiding the proto.
message FilterConfig {
- // [#not-implemented-hide:]
// If set, causes the use of the alternate protocols cache, which is responsible for
// parsing and caching HTTP Alt-Svc headers. This enables the use of HTTP/3 for upstream
// servers that advertise supporting it.
diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD b/api/envoy/extensions/filters/http/bandwidth_limit/v3/BUILD
similarity index 100%
rename from api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD
rename to api/envoy/extensions/filters/http/bandwidth_limit/v3/BUILD
diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/api/envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto
similarity index 93%
rename from api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto
rename to api/envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto
index 4cd5f8268b704..c512d541aaefc 100644
--- a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto
+++ b/api/envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package envoy.extensions.filters.http.bandwidth_limit.v3alpha;
+package envoy.extensions.filters.http.bandwidth_limit.v3;
import "envoy/config/core/v3/base.proto";
@@ -10,10 +10,9 @@ import "google/protobuf/wrappers.proto";
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.bandwidth_limit.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.bandwidth_limit.v3";
option java_outer_classname = "BandwidthLimitProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Bandwidth limit]
diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/BUILD b/api/envoy/extensions/filters/http/cache/v3/BUILD
similarity index 100%
rename from api/envoy/extensions/filters/http/cache/v3alpha/BUILD
rename to api/envoy/extensions/filters/http/cache/v3/BUILD
diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3/cache.proto
similarity index 96%
rename from generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto
rename to api/envoy/extensions/filters/http/cache/v3/cache.proto
index 5f0a5befa4bb3..71f4a5bb73f93 100644
--- a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto
+++ b/api/envoy/extensions/filters/http/cache/v3/cache.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package envoy.extensions.filters.http.cache.v3alpha;
+package envoy.extensions.filters.http.cache.v3;
import "envoy/config/route/v3/route_components.proto";
import "envoy/type/matcher/v3/string.proto";
@@ -11,10 +11,9 @@ import "udpa/annotations/status.proto";
import "udpa/annotations/versioning.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3";
option java_outer_classname = "CacheProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: HTTP Cache Filter]
diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto
deleted file mode 100644
index 5f0a5befa4bb3..0000000000000
--- a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto
+++ /dev/null
@@ -1,82 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.filters.http.cache.v3alpha;
-
-import "envoy/config/route/v3/route_components.proto";
-import "envoy/type/matcher/v3/string.proto";
-
-import "google/protobuf/any.proto";
-
-import "udpa/annotations/status.proto";
-import "udpa/annotations/versioning.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha";
-option java_outer_classname = "CacheProto";
-option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: HTTP Cache Filter]
-
-// [#extension: envoy.filters.http.cache]
-message CacheConfig {
- option (udpa.annotations.versioning).previous_message_type =
- "envoy.config.filter.http.cache.v2alpha.CacheConfig";
-
- // [#not-implemented-hide:]
- // Modifies cache key creation by restricting which parts of the URL are included.
- message KeyCreatorParams {
- option (udpa.annotations.versioning).previous_message_type =
- "envoy.config.filter.http.cache.v2alpha.CacheConfig.KeyCreatorParams";
-
- // If true, exclude the URL scheme from the cache key. Set to true if your origins always
- // produce the same response for http and https requests.
- bool exclude_scheme = 1;
-
- // If true, exclude the host from the cache key. Set to true if your origins' responses don't
- // ever depend on host.
- bool exclude_host = 2;
-
- // If *query_parameters_included* is nonempty, only query parameters matched
- // by one or more of its matchers are included in the cache key. Any other
- // query params will not affect cache lookup.
- repeated config.route.v3.QueryParameterMatcher query_parameters_included = 3;
-
- // If *query_parameters_excluded* is nonempty, query parameters matched by one
- // or more of its matchers are excluded from the cache key (even if also
- // matched by *query_parameters_included*), and will not affect cache lookup.
- repeated config.route.v3.QueryParameterMatcher query_parameters_excluded = 4;
- }
-
- // Config specific to the cache storage implementation.
- // [#extension-category: envoy.filters.http.cache]
- google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];
-
- // List of matching rules that defines allowed *Vary* headers.
- //
- // The *vary* response header holds a list of header names that affect the
- // contents of a response, as described by
- // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.
- //
- // During insertion, *allowed_vary_headers* acts as a allowlist: if a
- // response's *vary* header mentions any header names that aren't matched by any rules in
- // *allowed_vary_headers*, that response will not be cached.
- //
- // During lookup, *allowed_vary_headers* controls what request headers will be
- // sent to the cache storage implementation.
- repeated type.matcher.v3.StringMatcher allowed_vary_headers = 2;
-
- // [#not-implemented-hide:]
- //
- //
- // Modifies cache key creation by restricting which parts of the URL are included.
- KeyCreatorParams key_creator_params = 3;
-
- // [#not-implemented-hide:]
- //
- //
- // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache
- // storage implementation may have its own limit beyond which it will reject insertions).
- uint32 max_body_bytes = 4;
-}
diff --git a/api/envoy/extensions/watchdog/profile_action/v3alpha/BUILD b/api/envoy/extensions/filters/http/cdn_loop/v3/BUILD
similarity index 100%
rename from api/envoy/extensions/watchdog/profile_action/v3alpha/BUILD
rename to api/envoy/extensions/filters/http/cdn_loop/v3/BUILD
diff --git a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto b/api/envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto
similarity index 89%
rename from generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto
rename to api/envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto
index 5f201026c66b3..77a19511c3d45 100644
--- a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto
+++ b/api/envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto
@@ -1,14 +1,13 @@
syntax = "proto3";
-package envoy.extensions.filters.http.cdn_loop.v3alpha;
+package envoy.extensions.filters.http.cdn_loop.v3;
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3";
option java_outer_classname = "CdnLoopProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: HTTP CDN-Loop Filter]
diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto
deleted file mode 100644
index 5f201026c66b3..0000000000000
--- a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto
+++ /dev/null
@@ -1,36 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.filters.http.cdn_loop.v3alpha;
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha";
-option java_outer_classname = "CdnLoopProto";
-option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: HTTP CDN-Loop Filter]
-// [#extension: envoy.filters.http.cdn_loop]
-
-// CDN-Loop Header filter config. See the :ref:`configuration overview
-// ` for more information.
-message CdnLoopConfig {
- // The CDN identifier to use for loop checks and to append to the
- // CDN-Loop header.
- //
- // RFC 8586 calls this the cdn-id. The cdn-id can either be a
- // pseudonym or hostname the CDN is in control of.
- //
- // cdn_id must not be empty.
- string cdn_id = 1 [(validate.rules).string = {min_len: 1}];
-
- // The maximum allowed count of cdn_id in the downstream CDN-Loop
- // request header.
- //
- // The default of 0 means a request can transit the CdnLoopFilter
- // once. A value of 1 means that a request can transit the
- // CdnLoopFilter twice and so on.
- uint32 max_allowed_occurrences = 2;
-}
diff --git a/api/envoy/extensions/filters/http/composite/v3/BUILD b/api/envoy/extensions/filters/http/composite/v3/BUILD
index 1c1a6f6b44235..e9b556d681cfd 100644
--- a/api/envoy/extensions/filters/http/composite/v3/BUILD
+++ b/api/envoy/extensions/filters/http/composite/v3/BUILD
@@ -8,5 +8,6 @@ api_proto_package(
deps = [
"//envoy/config/core/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
+ "@com_github_cncf_udpa//xds/annotations/v3:pkg",
],
)
diff --git a/api/envoy/extensions/filters/http/composite/v3/composite.proto b/api/envoy/extensions/filters/http/composite/v3/composite.proto
index f8a3bd83af567..a53364e8adfaf 100644
--- a/api/envoy/extensions/filters/http/composite/v3/composite.proto
+++ b/api/envoy/extensions/filters/http/composite/v3/composite.proto
@@ -4,6 +4,8 @@ package envoy.extensions.filters.http.composite.v3;
import "envoy/config/core/v3/extension.proto";
+import "xds/annotations/v3/status.proto";
+
import "udpa/annotations/status.proto";
option java_package = "io.envoyproxy.envoy.extensions.filters.http.composite.v3";
@@ -25,9 +27,8 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// where a match tree is specified that indicates (via
// :ref:`ExecuteFilterAction `)
// which filter configuration to create and delegate to.
-//
-// [#alpha:]
message Composite {
+ option (xds.annotations.v3.message_status).work_in_progress = true;
}
// Composite match action (see :ref:`matching docs ` for more info on match actions).
diff --git a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto
index a5d7223b98d28..ecf2d271f952c 100644
--- a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto
+++ b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto
@@ -27,6 +27,12 @@ message FilterConfig {
// `.
common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1
[(validate.rules).message = {required: true}];
+
+ // When this flag is set, the filter will add the resolved upstream address in the filter
+ // state. The state should be saved with key
+ // `envoy.stream.upstream_address` (See
+ // :repo:`upstream_address.h`).
+ bool save_upstream_address = 2;
}
// Per route Configuration for the dynamic forward proxy HTTP filter.
diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto
index 62feb51b191d5..b05420fa93cf4 100644
--- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto
+++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto
@@ -244,6 +244,7 @@ message AuthorizationRequest {
repeated config.core.v3.HeaderValue headers_to_add = 2;
}
+// [#next-free-field: 6]
message AuthorizationResponse {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.filter.http.ext_authz.v2.AuthorizationResponse";
@@ -270,6 +271,15 @@ message AuthorizationResponse {
// the authorization response itself is successful, i.e. not failed or denied. When this list is
// *not* set, no additional headers will be added to the client's response on success.
type.matcher.v3.ListStringMatcher allowed_client_headers_on_success = 4;
+
+ // When this :ref:`list ` is set, authorization
+ // response headers that have a correspondent match will be emitted as dynamic metadata to be consumed
+ // by the next filter. This metadata lives in a namespace specified by the canonical name of extension filter
+ // that requires it:
+ //
+ // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter.
+ // - :ref:`envoy.filters.network.ext_authz ` for network filter.
+ type.matcher.v3.ListStringMatcher dynamic_metadata_from_headers = 5;
}
// Extra settings on a per virtualhost/route/weighted-cluster level.
diff --git a/generated_api_shadow/envoy/config/ratelimit/v3/BUILD b/api/envoy/extensions/filters/http/ext_proc/v3/BUILD
similarity index 84%
rename from generated_api_shadow/envoy/config/ratelimit/v3/BUILD
rename to api/envoy/extensions/filters/http/ext_proc/v3/BUILD
index 1c1a6f6b44235..e9b556d681cfd 100644
--- a/generated_api_shadow/envoy/config/ratelimit/v3/BUILD
+++ b/api/envoy/extensions/filters/http/ext_proc/v3/BUILD
@@ -8,5 +8,6 @@ api_proto_package(
deps = [
"//envoy/config/core/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
+ "@com_github_cncf_udpa//xds/annotations/v3:pkg",
],
)
diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto b/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto
similarity index 96%
rename from api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto
rename to api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto
index 37560feba3c27..e688657830a07 100644
--- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto
+++ b/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto
@@ -1,20 +1,22 @@
syntax = "proto3";
-package envoy.extensions.filters.http.ext_proc.v3alpha;
+package envoy.extensions.filters.http.ext_proc.v3;
import "envoy/config/core/v3/grpc_service.proto";
-import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto";
+import "envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto";
import "google/protobuf/duration.proto";
+import "xds/annotations/v3/status.proto";
+
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3";
option java_outer_classname = "ExtProcProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
+option (xds.annotations.v3.file_status).work_in_progress = true;
// [#protodoc-title: External Processing Filter]
// External Processing Filter
@@ -88,9 +90,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
//
// The protocol itself is based on a bidirectional gRPC stream. Envoy will send the
// server
-// :ref:`ProcessingRequest `
+// :ref:`ProcessingRequest `
// messages, and the server must reply with
-// :ref:`ProcessingResponse `.
+// :ref:`ProcessingResponse `.
// [#next-free-field: 9]
message ExternalProcessor {
diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto b/api/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto
similarity index 93%
rename from generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto
rename to api/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto
index d085790d34ab1..c15a5569a12c6 100644
--- a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto
+++ b/api/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto
@@ -1,15 +1,17 @@
syntax = "proto3";
-package envoy.extensions.filters.http.ext_proc.v3alpha;
+package envoy.extensions.filters.http.ext_proc.v3;
+
+import "xds/annotations/v3/status.proto";
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3";
option java_outer_classname = "ProcessingModeProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
+option (xds.annotations.v3.file_status).work_in_progress = true;
// [#protodoc-title: External Processing Filter]
// External Processing Filter Processing Mode
diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto b/api/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto
deleted file mode 100644
index d085790d34ab1..0000000000000
--- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto
+++ /dev/null
@@ -1,74 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.filters.http.ext_proc.v3alpha;
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha";
-option java_outer_classname = "ProcessingModeProto";
-option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: External Processing Filter]
-// External Processing Filter Processing Mode
-// [#extension: envoy.filters.http.ext_proc]
-
-// This configuration describes which parts of an HTTP request and
-// response are sent to a remote server and how they are delivered.
-
-// [#next-free-field: 7]
-message ProcessingMode {
- // Control how headers and trailers are handled
- enum HeaderSendMode {
- // The default HeaderSendMode depends on which part of the message is being
- // processed. By default, request and response headers are sent,
- // while trailers are skipped.
- DEFAULT = 0;
-
- // Send the header or trailer.
- SEND = 1;
-
- // Do not send the header or trailer.
- SKIP = 2;
- }
-
- // Control how the request and response bodies are handled
- enum BodySendMode {
- // Do not send the body at all. This is the default.
- NONE = 0;
-
- // Stream the body to the server in pieces as they arrive at the
- // proxy.
- STREAMED = 1;
-
- // Buffer the message body in memory and send the entire body at once.
- // If the body exceeds the configured buffer limit, then the
- // downstream system will receive an error.
- BUFFERED = 2;
-
- // Buffer the message body in memory and send the entire body in one
- // chunk. If the body exceeds the configured buffer limit, then the body contents
- // up to the buffer limit will be sent.
- BUFFERED_PARTIAL = 3;
- }
-
- // How to handle the request header. Default is "SEND".
- HeaderSendMode request_header_mode = 1 [(validate.rules).enum = {defined_only: true}];
-
- // How to handle the response header. Default is "SEND".
- HeaderSendMode response_header_mode = 2 [(validate.rules).enum = {defined_only: true}];
-
- // How to handle the request body. Default is "NONE".
- BodySendMode request_body_mode = 3 [(validate.rules).enum = {defined_only: true}];
-
- // How do handle the response body. Default is "NONE".
- BodySendMode response_body_mode = 4 [(validate.rules).enum = {defined_only: true}];
-
- // How to handle the request trailers. Default is "SKIP".
- HeaderSendMode request_trailer_mode = 5 [(validate.rules).enum = {defined_only: true}];
-
- // How to handle the response trailers. Default is "SKIP".
- HeaderSendMode response_trailer_mode = 6 [(validate.rules).enum = {defined_only: true}];
-}
diff --git a/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto b/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto
index a4feeff31f158..7311abe8df6f8 100644
--- a/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto
+++ b/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto
@@ -15,7 +15,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// gRPC-JSON transcoder :ref:`configuration overview `.
// [#extension: envoy.filters.http.grpc_json_transcoder]
-// [#next-free-field: 12]
+// [#next-free-field: 13]
// GrpcJsonTranscoder filter configuration.
// The filter itself can be used per route / per virtual host or on the general level. The most
// specific one is being used for a given route. If the list of services is empty - filter
@@ -211,12 +211,16 @@ message GrpcJsonTranscoder {
bool convert_grpc_status = 9;
// URL unescaping policy.
- // This spec is only applied when extracting variable with multiple segments.
+ // This spec is only applied when extracting variable with multiple segments in the URL path.
// For example, in case of `/foo/{x=*}/bar/{y=prefix/*}/{z=**}` `x` variable is single segment and `y` and `z` are multiple segments.
// For a path with `/foo/first/bar/prefix/second/third/fourth`, `x=first`, `y=prefix/second`, `z=third/fourth`.
// If this setting is not specified, the value defaults to :ref:`ALL_CHARACTERS_EXCEPT_RESERVED`.
UrlUnescapeSpec url_unescape_spec = 10 [(validate.rules).enum = {defined_only: true}];
+ // If true, unescape '+' to space when extracting variables in query parameters.
+ // This is to support `HTML 2.0 `_
+ bool query_param_unescape_plus = 12;
+
// Configure the behavior when handling requests that cannot be transcoded.
//
// By default, the transcoder will silently pass through HTTP requests that are malformed.
diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto
index 9718dbe0550ab..6d15956e1479e 100644
--- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto
+++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto
@@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// cache_duration:
// seconds: 300
//
-// [#next-free-field: 14]
+// [#next-free-field: 15]
message JwtProvider {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider";
@@ -137,6 +137,7 @@ message JwtProvider {
// If false, the JWT is removed in the request after a success verification. If true, the JWT is
// not removed in the request. Default value is false.
+ // caveat: only works for from_header & has no effect for JWTs extracted through from_params & from_cookies.
bool forward = 5;
// Two fields below define where to extract the JWT from an HTTP request.
@@ -230,6 +231,46 @@ message JwtProvider {
//
string payload_in_metadata = 9;
+ // If not empty, similar to :ref:`payload_in_metadata `,
+ // a successfully verified JWT header will be written to :ref:`Dynamic State `
+ // as an entry (``protobuf::Struct``) in **envoy.filters.http.jwt_authn** *namespace* with the
+ // value of this field as the key.
+ //
+ // For example, if ``header_in_metadata`` is *my_header*:
+ //
+ // .. code-block:: yaml
+ //
+ // envoy.filters.http.jwt_authn:
+ // my_header:
+ // alg: JWT
+ // kid: EF71iSaosbC5C4tC6Syq1Gm647M
+ // alg: PS256
+ //
+ // When the metadata has **envoy.filters.http.jwt_authn** entry already (for example if
+ // :ref:`payload_in_metadata `
+ // is not empty), it will be inserted as a new entry in the same *namespace* as shown below:
+ //
+ // .. code-block:: yaml
+ //
+ // envoy.filters.http.jwt_authn:
+ // my_payload:
+ // iss: https://example.com
+ // sub: test@example.com
+ // aud: https://example.com
+ // exp: 1501281058
+ // my_header:
+ // alg: JWT
+ // kid: EF71iSaosbC5C4tC6Syq1Gm647M
+ // alg: PS256
+ //
+ // .. warning::
+ // Using the same key name for :ref:`header_in_metadata `
+ // and :ref:`payload_in_metadata `
+ // is not suggested due to potential override of existing entry, while it is not enforced during
+ // config validation.
+ //
+ string header_in_metadata = 14;
+
// Specify the clock skew in seconds when verifying JWT time constraint,
// such as `exp`, and `nbf`. If not specified, default is 60 seconds.
uint32 clock_skew_seconds = 10;
@@ -541,7 +582,8 @@ message FilterStateRule {
// A map of string keys to requirements. The string key is the string value
// in the FilterState with the name specified in the *name* field above.
- map requires = 3;
+ map
+ requires = 3;
}
// This is the Envoy HTTP filter config for JWT authentication.
diff --git a/api/envoy/extensions/filters/http/oauth2/v3alpha/BUILD b/api/envoy/extensions/filters/http/oauth2/v3/BUILD
similarity index 100%
rename from api/envoy/extensions/filters/http/oauth2/v3alpha/BUILD
rename to api/envoy/extensions/filters/http/oauth2/v3/BUILD
diff --git a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v3/oauth.proto
similarity index 96%
rename from generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto
rename to api/envoy/extensions/filters/http/oauth2/v3/oauth.proto
index e5f990512ca87..e88455454715d 100644
--- a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto
+++ b/api/envoy/extensions/filters/http/oauth2/v3/oauth.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package envoy.extensions.filters.http.oauth2.v3alpha;
+package envoy.extensions.filters.http.oauth2.v3;
import "envoy/config/core/v3/http_uri.proto";
import "envoy/config/route/v3/route_components.proto";
@@ -10,10 +10,9 @@ import "envoy/type/matcher/v3/path.proto";
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3";
option java_outer_classname = "OauthProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: OAuth]
diff --git a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto
deleted file mode 100644
index e5f990512ca87..0000000000000
--- a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto
+++ /dev/null
@@ -1,89 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.filters.http.oauth2.v3alpha;
-
-import "envoy/config/core/v3/http_uri.proto";
-import "envoy/config/route/v3/route_components.proto";
-import "envoy/extensions/transport_sockets/tls/v3/secret.proto";
-import "envoy/type/matcher/v3/path.proto";
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3alpha";
-option java_outer_classname = "OauthProto";
-option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: OAuth]
-// OAuth :ref:`configuration overview `.
-// [#extension: envoy.filters.http.oauth2]
-//
-
-message OAuth2Credentials {
- // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server.
- string client_id = 1 [(validate.rules).string = {min_len: 1}];
-
- // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server.
- transport_sockets.tls.v3.SdsSecretConfig token_secret = 2
- [(validate.rules).message = {required: true}];
-
- // Configures how the secret token should be created.
- oneof token_formation {
- option (validate.required) = true;
-
- // If present, the secret token will be a HMAC using the provided secret.
- transport_sockets.tls.v3.SdsSecretConfig hmac_secret = 3
- [(validate.rules).message = {required: true}];
- }
-}
-
-// OAuth config
-//
-// [#next-free-field: 11]
-message OAuth2Config {
- // Endpoint on the authorization server to retrieve the access token from.
- config.core.v3.HttpUri token_endpoint = 1;
-
- // The endpoint redirect to for authorization in response to unauthorized requests.
- string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}];
-
- // Credentials used for OAuth.
- OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}];
-
- // The redirect URI passed to the authorization endpoint. Supports header formatting
- // tokens. For more information, including details on header value syntax, see the
- // documentation on :ref:`custom request headers `.
- //
- // This URI should not contain any query parameters.
- string redirect_uri = 4 [(validate.rules).string = {min_len: 1}];
-
- // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server.
- type.matcher.v3.PathMatcher redirect_path_matcher = 5
- [(validate.rules).message = {required: true}];
-
- // The path to sign a user out, clearing their credential cookies.
- type.matcher.v3.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}];
-
- // Forward the OAuth token as a Bearer to upstream web service.
- bool forward_bearer_token = 7;
-
- // Any request that matches any of the provided matchers will be passed through without OAuth validation.
- repeated config.route.v3.HeaderMatcher pass_through_matcher = 8;
-
- // Optional list of OAuth scopes to be claimed in the authorization request. If not specified,
- // defaults to "user" scope.
- // OAuth RFC https://tools.ietf.org/html/rfc6749#section-3.3
- repeated string auth_scopes = 9;
-
- // Optional resource parameter for authorization request
- // RFC: https://tools.ietf.org/html/rfc8707
- repeated string resources = 10;
-}
-
-// Filter config.
-message OAuth2 {
- // Leave this empty to disable OAuth2 for a specific route, using per filter config.
- OAuth2Config config = 1;
-}
diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/BUILD b/api/envoy/extensions/filters/http/ratelimit/v3/BUILD
index 0bad14913d217..5b2bddfabb819 100644
--- a/api/envoy/extensions/filters/http/ratelimit/v3/BUILD
+++ b/api/envoy/extensions/filters/http/ratelimit/v3/BUILD
@@ -6,7 +6,10 @@ licenses(["notice"]) # Apache 2
api_proto_package(
deps = [
+ "//envoy/config/core/v3:pkg",
"//envoy/config/ratelimit/v3:pkg",
+ "//envoy/config/route/v3:pkg",
+ "//envoy/type/metadata/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
],
)
diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto
index bc58e7f9b2e1a..53fb849361c1d 100644
--- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto
+++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto
@@ -2,7 +2,10 @@ syntax = "proto3";
package envoy.extensions.filters.http.ratelimit.v3;
+import "envoy/config/core/v3/extension.proto";
import "envoy/config/ratelimit/v3/rls.proto";
+import "envoy/config/route/v3/route_components.proto";
+import "envoy/type/metadata/v3/metadata.proto";
import "google/protobuf/duration.proto";
@@ -105,6 +108,214 @@ message RateLimit {
bool disable_x_envoy_ratelimited_header = 9;
}
+// Global rate limiting :ref:`architecture overview `.
+// Also applies to Local rate limiting :ref:`using descriptors `.
+// [#not-implemented-hide:]
+message RateLimitConfig {
+ // [#next-free-field: 10]
+ message Action {
+ // The following descriptor entry is appended to the descriptor:
+ //
+ // .. code-block:: cpp
+ //
+ // ("source_cluster", "")
+ //
+ // is derived from the :option:`--service-cluster` option.
+ message SourceCluster {
+ }
+
+ // The following descriptor entry is appended to the descriptor:
+ //
+ // .. code-block:: cpp
+ //
+ // ("destination_cluster", "")
+ //
+ // Once a request matches against a route table rule, a routed cluster is determined by one of
+ // the following :ref:`route table configuration `
+ // settings:
+ //
+ // * :ref:`cluster ` indicates the upstream cluster
+ // to route to.
+ // * :ref:`weighted_clusters `
+ // chooses a cluster randomly from a set of clusters with attributed weight.
+ // * :ref:`cluster_header ` indicates which
+ // header in the request contains the target cluster.
+ message DestinationCluster {
+ }
+
+ // The following descriptor entry is appended when a header contains a key that matches the
+ // *header_name*:
+ //
+ // .. code-block:: cpp
+ //
+ // ("", "")
+ message RequestHeaders {
+ // The header name to be queried from the request headers. The header’s
+ // value is used to populate the value of the descriptor entry for the
+ // descriptor_key.
+ string header_name = 1
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+
+ // The key to use in the descriptor entry.
+ string descriptor_key = 2 [(validate.rules).string = {min_len: 1}];
+
+ // If set to true, Envoy skips the descriptor while calling rate limiting service
+ // when header is not present in the request. By default it skips calling the
+ // rate limiting service if this header is not present in the request.
+ bool skip_if_absent = 3;
+ }
+
+ // The following descriptor entry is appended to the descriptor and is populated using the
+ // trusted address from :ref:`x-forwarded-for `:
+ //
+ // .. code-block:: cpp
+ //
+ // ("remote_address", "")
+ message RemoteAddress {
+ }
+
+ // The following descriptor entry is appended to the descriptor:
+ //
+ // .. code-block:: cpp
+ //
+ // ("generic_key", "")
+ message GenericKey {
+ // The value to use in the descriptor entry.
+ string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];
+
+ // An optional key to use in the descriptor entry. If not set it defaults
+ // to 'generic_key' as the descriptor key.
+ string descriptor_key = 2;
+ }
+
+ // The following descriptor entry is appended to the descriptor:
+ //
+ // .. code-block:: cpp
+ //
+ // ("header_match", "")
+ message HeaderValueMatch {
+ // The value to use in the descriptor entry.
+ string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];
+
+ // If set to true, the action will append a descriptor entry when the
+ // request matches the headers. If set to false, the action will append a
+ // descriptor entry when the request does not match the headers. The
+ // default value is true.
+ bool expect_match = 2;
+
+ // Specifies a set of headers that the rate limit action should match
+ // on. The action will check the request’s headers against all the
+ // specified headers in the config. A match will happen if all the
+ // headers in the config are present in the request with the same values
+ // (or based on presence if the value field is not in the config).
+ repeated config.route.v3.HeaderMatcher headers = 3
+ [(validate.rules).repeated = {min_items: 1}];
+ }
+
+ // The following descriptor entry is appended when the metadata contains a key value:
+ //
+ // .. code-block:: cpp
+ //
+ // ("", "")
+ message MetaData {
+ enum Source {
+ // Query :ref:`dynamic metadata `
+ DYNAMIC = 0;
+
+ // Query :ref:`route entry metadata `
+ ROUTE_ENTRY = 1;
+ }
+
+ // The key to use in the descriptor entry.
+ string descriptor_key = 1 [(validate.rules).string = {min_len: 1}];
+
+ // Metadata struct that defines the key and path to retrieve the string value. A match will
+ // only happen if the value in the metadata is of type string.
+ type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}];
+
+ // An optional value to use if *metadata_key* is empty. If not set and
+ // no value is present under the metadata_key then no descriptor is generated.
+ string default_value = 3;
+
+ // Source of metadata
+ Source source = 4 [(validate.rules).enum = {defined_only: true}];
+ }
+
+ oneof action_specifier {
+ option (validate.required) = true;
+
+ // Rate limit on source cluster.
+ SourceCluster source_cluster = 1;
+
+ // Rate limit on destination cluster.
+ DestinationCluster destination_cluster = 2;
+
+ // Rate limit on request headers.
+ RequestHeaders request_headers = 3;
+
+ // Rate limit on remote address.
+ RemoteAddress remote_address = 4;
+
+ // Rate limit on a generic key.
+ GenericKey generic_key = 5;
+
+ // Rate limit on the existence of request headers.
+ HeaderValueMatch header_value_match = 6;
+
+ // Rate limit on metadata.
+ MetaData metadata = 8;
+
+ // Rate limit descriptor extension. See the rate limit descriptor extensions documentation.
+ // [#extension-category: envoy.rate_limit_descriptors]
+ config.core.v3.TypedExtensionConfig extension = 9;
+ }
+ }
+
+ message Override {
+ // Fetches the override from the dynamic metadata.
+ message DynamicMetadata {
+ // Metadata struct that defines the key and path to retrieve the struct value.
+ // The value must be a struct containing an integer "requests_per_unit" property
+ // and a "unit" property with a value parseable to :ref:`RateLimitUnit
+ // enum `
+ type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}];
+ }
+
+ oneof override_specifier {
+ option (validate.required) = true;
+
+ // Limit override from dynamic metadata.
+ DynamicMetadata dynamic_metadata = 1;
+ }
+ }
+
+ // Refers to the stage set in the filter. The rate limit configuration only
+ // applies to filters with the same stage number. The default stage number is
+ // 0.
+ //
+ // .. note::
+ //
+ // The filter supports a range of 0 - 10 inclusively for stage numbers.
+ uint32 stage = 1 [(validate.rules).uint32 = {lte: 10}];
+
+ // The key to be set in runtime to disable this rate limit configuration.
+ string disable_key = 2;
+
+ // A list of actions that are to be applied for this rate limit configuration.
+ // Order matters as the actions are processed sequentially and the descriptor
+ // is composed by appending descriptor entries in that sequence. If an action
+ // cannot append a descriptor entry, no descriptor is generated for the
+ // configuration. See :ref:`composing actions
+ // ` for additional documentation.
+ repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}];
+
+ // An optional limit override to be appended to the descriptor produced by this
+ // rate limit configuration. If the override value is invalid or cannot be resolved
+ // from metadata, no override is provided. See :ref:`rate limit override
+ // ` for more information.
+ Override limit = 4;
+}
+
message RateLimitPerRoute {
enum VhRateLimitsOptions {
// Use the virtual host rate limits unless the route has a rate limit policy.
@@ -117,6 +328,32 @@ message RateLimitPerRoute {
IGNORE = 2;
}
+ // The override option determines how the filter handles the cases where there is an override config at a more specific level than this one (from least to most specific: virtual host, route, cluster weight).
+ // [#not-implemented-hide:]
+ enum OverrideOptions {
+ // Client-defined default, typically OVERRIDE_POLICY. If VhRateLimitsOptions is set, that will be used instead.
+ DEFAULT = 0;
+
+ // If there is an override config at a more specific level, use that instead of this one.
+ OVERRIDE_POLICY = 1;
+
+ // If there is an override config at a more specific level, use data from both.
+ INCLUDE_POLICY = 2;
+
+ // If there is an override config at a more specific level, ignore it and use only this one.
+ IGNORE_POLICY = 3;
+ }
+
// Specifies if the rate limit filter should include the virtual host rate limits.
VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}];
+
+ // Specifies if the rate limit filter should include the lower levels (route level, virtual host level or cluster weight level) rate limits override options.
+ // [#not-implemented-hide:]
+ OverrideOptions override_option = 2 [(validate.rules).enum = {defined_only: true}];
+
+ // Rate limit configuration. If not set, uses the
+ // :ref:`VirtualHost.rate_limits` or
+ // :ref:`RouteAction.rate_limits` fields instead.
+ // [#not-implemented-hide:]
+ repeated RateLimitConfig rate_limits = 3;
}
diff --git a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/BUILD
similarity index 100%
rename from api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD
rename to api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/BUILD
diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto
similarity index 92%
rename from generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto
rename to api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto
index 7f7eb57d5be64..a084b0682b672 100644
--- a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto
+++ b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto
@@ -1,16 +1,15 @@
syntax = "proto3";
-package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha;
+package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3;
import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto";
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3";
option java_outer_classname = "SniDynamicForwardProxyProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: SNI dynamic forward proxy]
diff --git a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto
deleted file mode 100644
index 7f7eb57d5be64..0000000000000
--- a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto
+++ /dev/null
@@ -1,36 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha;
-
-import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto";
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha";
-option java_outer_classname = "SniDynamicForwardProxyProto";
-option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: SNI dynamic forward proxy]
-
-// Configuration for the SNI-based dynamic forward proxy filter. See the
-// :ref:`architecture overview ` for
-// more information. Note this filter must be configured along with
-// :ref:`TLS inspector listener filter `
-// to work.
-// [#extension: envoy.filters.network.sni_dynamic_forward_proxy]
-message FilterConfig {
- // The DNS cache configuration that the filter will attach to. Note this
- // configuration must match that of associated :ref:`dynamic forward proxy
- // cluster configuration
- // `.
- common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1
- [(validate.rules).message = {required: true}];
-
- oneof port_specifier {
- // The port number to connect to the upstream.
- uint32 port_value = 2 [(validate.rules).uint32 = {lte: 65535 gt: 0}];
- }
-}
diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto
index 4916330ec5f3a..01c41c77bb2b5 100644
--- a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto
+++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto
@@ -85,8 +85,8 @@ message ThriftProxy {
repeated ThriftFilter thrift_filters = 5;
// If set to true, Envoy will try to skip decode data after metadata in the Thrift message.
- // This mode will only work if the upstream and downstream protocols are the same and the transport
- // is the same, the transport type is framed and the protocol is not Twitter. Otherwise Envoy will
+ // This mode will only work if the upstream and downstream protocols are the same and the transports
+ // are Framed or Header, and the protocol is not Twitter. Otherwise Envoy will
// fallback to decode the data.
bool payload_passthrough = 6;
diff --git a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD b/api/envoy/extensions/filters/udp/dns_filter/v3/BUILD
similarity index 100%
rename from api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD
rename to api/envoy/extensions/filters/udp/dns_filter/v3/BUILD
diff --git a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto
similarity index 96%
rename from api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto
rename to api/envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto
index 39f44724c430f..63542bdadc7fd 100644
--- a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto
+++ b/api/envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package envoy.extensions.filters.udp.dns_filter.v3alpha;
+package envoy.extensions.filters.udp.dns_filter.v3;
import "envoy/config/core/v3/address.proto";
import "envoy/config/core/v3/base.proto";
@@ -13,10 +13,9 @@ import "envoy/annotations/deprecation.proto";
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3";
option java_outer_classname = "DnsFilterProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: DNS Filter]
diff --git a/api/envoy/extensions/key_value/file_based/v3/BUILD b/api/envoy/extensions/key_value/file_based/v3/BUILD
index ee92fb652582e..ec1e778e06e5c 100644
--- a/api/envoy/extensions/key_value/file_based/v3/BUILD
+++ b/api/envoy/extensions/key_value/file_based/v3/BUILD
@@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
licenses(["notice"]) # Apache 2
api_proto_package(
- deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"],
+ deps = [
+ "@com_github_cncf_udpa//udpa/annotations:pkg",
+ "@com_github_cncf_udpa//xds/annotations/v3:pkg",
+ ],
)
diff --git a/api/envoy/extensions/key_value/file_based/v3/config.proto b/api/envoy/extensions/key_value/file_based/v3/config.proto
index 0eff4feb8f941..82aa94f8cb648 100644
--- a/api/envoy/extensions/key_value/file_based/v3/config.proto
+++ b/api/envoy/extensions/key_value/file_based/v3/config.proto
@@ -4,6 +4,8 @@ package envoy.extensions.key_value.file_based.v3;
import "google/protobuf/duration.proto";
+import "xds/annotations/v3/status.proto";
+
import "udpa/annotations/status.proto";
import "validate/validate.proto";
@@ -14,10 +16,11 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: File Based Key Value Store storage plugin]
-// [#alpha:]
// [#extension: envoy.key_value.file_based]
// This is configuration to flush a key value store out to disk.
message FileBasedKeyValueStoreConfig {
+ option (xds.annotations.v3.message_status).work_in_progress = true;
+
// The filename to read the keys and values from, and write the keys and
// values to.
string filename = 1 [(validate.rules).string = {min_len: 1}];
diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD b/api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/BUILD
similarity index 100%
rename from generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD
rename to api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/BUILD
diff --git a/api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.proto b/api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.proto
new file mode 100644
index 0000000000000..a4bdc73fa81a0
--- /dev/null
+++ b/api/envoy/extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.proto
@@ -0,0 +1,35 @@
+syntax = "proto3";
+
+package envoy.extensions.rbac.matchers.upstream_ip_port.v3;
+
+import "envoy/config/core/v3/address.proto";
+import "envoy/type/v3/range.proto";
+
+import "udpa/annotations/status.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.rbac.matchers.upstream_ip_port.v3";
+option java_outer_classname = "UpstreamIpPortMatcherProto";
+option java_multiple_files = true;
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: RBAC upstream IP and port matcher plugin]
+// [#extension: envoy.rbac.matchers.upstream_ip_port]
+
+// This is configuration for matching upstream ip and port.
+// Note that although both fields are optional, at least one of IP or port must be supplied. If only
+// one is supplied the other is a wildcard match.
+// This matcher requires a filter in the chain to have saved the upstream address in the
+// filter state before the matcher is executed by RBAC filter. The state should be saved with key
+// `envoy.stream.upstream_address` (See
+// :repo:`upstream_address.h`).
+// Also, See :repo:`proxy_filter.cc<
+// source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc>` for an example of a
+// filter which populates the FilterState.
+message UpstreamIpPortMatcher {
+ // A CIDR block that will be used to match the upstream IP.
+ // Both Ipv4 and Ipv6 ranges can be matched.
+ config.core.v3.CidrRange upstream_ip = 1;
+
+ // A port range that will be used to match the upstream port.
+ type.v3.Int64Range upstream_port_range = 2;
+}
diff --git a/api/envoy/watchdog/v3alpha/BUILD b/api/envoy/extensions/transport_sockets/s2a/v3/BUILD
similarity index 100%
rename from api/envoy/watchdog/v3alpha/BUILD
rename to api/envoy/extensions/transport_sockets/s2a/v3/BUILD
diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto b/api/envoy/extensions/transport_sockets/s2a/v3/s2a.proto
similarity index 83%
rename from generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto
rename to api/envoy/extensions/transport_sockets/s2a/v3/s2a.proto
index b32b84653e690..7c77222f59d63 100644
--- a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto
+++ b/api/envoy/extensions/transport_sockets/s2a/v3/s2a.proto
@@ -1,14 +1,13 @@
syntax = "proto3";
-package envoy.extensions.transport_sockets.s2a.v3alpha;
+package envoy.extensions.transport_sockets.s2a.v3;
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.s2a.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.s2a.v3";
option java_outer_classname = "S2aProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#not-implemented-hide:]
diff --git a/api/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto b/api/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto
deleted file mode 100644
index b32b84653e690..0000000000000
--- a/api/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.transport_sockets.s2a.v3alpha;
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.s2a.v3alpha";
-option java_outer_classname = "S2aProto";
-option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#not-implemented-hide:]
-// Configuration for S2A transport socket. This allows Envoy clients to
-// configure how to offload mTLS handshakes to the S2A service.
-// https://github.com/google/s2a-core#readme
-message S2AConfiguration {
- // The address of the S2A. This can be an IP address or a hostname,
- // followed by a port number.
- string s2a_address = 1 [(validate.rules).string = {min_len: 1}];
-}
diff --git a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto
index 271dcfbe49cec..1267488d98c6a 100644
--- a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto
+++ b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto
@@ -118,12 +118,13 @@ message HttpProtocolOptions {
// is alpha is not guaranteed to be API-stable.
config.core.v3.Http3ProtocolOptions http3_protocol_options = 3;
- // [#not-implemented-hide:]
// The presence of alternate protocols cache options causes the use of the
// alternate protocols cache, which is responsible for parsing and caching
// HTTP Alt-Svc headers. This enables the use of HTTP/3 for origins that
// advertise supporting it.
- // TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled.
+ //
+ // .. note::
+ // This is required when HTTP/3 is enabled.
config.core.v3.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 4;
}
diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD b/api/envoy/extensions/watchdog/profile_action/v3/BUILD
similarity index 100%
rename from generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD
rename to api/envoy/extensions/watchdog/profile_action/v3/BUILD
diff --git a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto b/api/envoy/extensions/watchdog/profile_action/v3/profile_action.proto
similarity index 87%
rename from generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto
rename to api/envoy/extensions/watchdog/profile_action/v3/profile_action.proto
index d73f0b5dfb9c5..07c3907fbd61a 100644
--- a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto
+++ b/api/envoy/extensions/watchdog/profile_action/v3/profile_action.proto
@@ -1,16 +1,15 @@
syntax = "proto3";
-package envoy.extensions.watchdog.profile_action.v3alpha;
+package envoy.extensions.watchdog.profile_action.v3;
import "google/protobuf/duration.proto";
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3alpha";
+option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3";
option java_outer_classname = "ProfileActionProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Watchdog Action that does CPU profiling.]
diff --git a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto b/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto
deleted file mode 100644
index d73f0b5dfb9c5..0000000000000
--- a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto
+++ /dev/null
@@ -1,31 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.watchdog.profile_action.v3alpha;
-
-import "google/protobuf/duration.proto";
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3alpha";
-option java_outer_classname = "ProfileActionProto";
-option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: Watchdog Action that does CPU profiling.]
-// [#extension: envoy.watchdog.profile_action]
-
-// Configuration for the profile watchdog action.
-message ProfileActionConfig {
- // How long the profile should last. If not set defaults to 5 seconds.
- google.protobuf.Duration profile_duration = 1;
-
- // File path to the directory to output profiles.
- string profile_path = 2 [(validate.rules).string = {min_len: 1}];
-
- // Limits the max number of profiles that can be generated by this action
- // over its lifetime to avoid filling the disk.
- // If not set (i.e. it's 0), a default of 10 will be used.
- uint64 max_profiles = 3;
-}
diff --git a/api/envoy/service/auth/v3/external_auth.proto b/api/envoy/service/auth/v3/external_auth.proto
index b627fcb314751..11fc057da888a 100644
--- a/api/envoy/service/auth/v3/external_auth.proto
+++ b/api/envoy/service/auth/v3/external_auth.proto
@@ -12,7 +12,6 @@ import "google/rpc/status.proto";
import "envoy/annotations/deprecation.proto";
import "udpa/annotations/status.proto";
import "udpa/annotations/versioning.proto";
-import "validate/validate.proto";
option java_package = "io.envoyproxy.envoy.service.auth.v3";
option java_outer_classname = "ExternalAuthProto";
@@ -46,9 +45,9 @@ message DeniedHttpResponse {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.auth.v2.DeniedHttpResponse";
- // This field allows the authorization service to send a HTTP response status
- // code to the downstream client other than 403 (Forbidden).
- type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}];
+ // This field allows the authorization service to send an HTTP response status code to the
+ // downstream client. If not set, Envoy sends ``403 Forbidden`` HTTP status code by default.
+ type.v3.HttpStatus status = 1;
// This field allows the authorization service to send HTTP response headers
// to the downstream client. Note that the :ref:`append field in HeaderValueOption ` defaults to
@@ -61,7 +60,7 @@ message DeniedHttpResponse {
}
// HTTP attributes for an OK response.
-// [#next-free-field: 7]
+// [#next-free-field: 9]
message OkHttpResponse {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.auth.v2.OkHttpResponse";
@@ -103,6 +102,15 @@ message OkHttpResponse {
// to the downstream client on success. Note that the :ref:`append field in HeaderValueOption `
// defaults to false when used in this message.
repeated config.core.v3.HeaderValueOption response_headers_to_add = 6;
+
+ // This field allows the authorization service to set (and overwrite) query
+ // string parameters on the original request before it is sent upstream.
+ repeated config.core.v3.QueryParameter query_parameters_to_set = 7;
+
+ // This field allows the authorization service to specify which query parameters
+ // should be removed from the original request before it is sent upstream. Each
+ // element in this list is a case-sensitive query parameter name to be removed.
+ repeated string query_parameters_to_remove = 8;
}
// Intended for gRPC and Network Authorization servers `only`.
@@ -110,7 +118,9 @@ message CheckResponse {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.auth.v2.CheckResponse";
- // Status `OK` allows the request. Any other status indicates the request should be denied.
+ // Status `OK` allows the request. Any other status indicates the request should be denied, and
+ // for HTTP filter, if not overridden by :ref:`denied HTTP response status `
+ // Envoy sends ``403 Forbidden`` HTTP status code by default.
google.rpc.Status status = 1;
// An message that contains HTTP response attributes. This message is
diff --git a/generated_api_shadow/envoy/service/auth/v3/BUILD b/api/envoy/service/ext_proc/v3/BUILD
similarity index 76%
rename from generated_api_shadow/envoy/service/auth/v3/BUILD
rename to api/envoy/service/ext_proc/v3/BUILD
index 0774dda23e421..d4506b16ed5d2 100644
--- a/generated_api_shadow/envoy/service/auth/v3/BUILD
+++ b/api/envoy/service/ext_proc/v3/BUILD
@@ -7,10 +7,10 @@ licenses(["notice"]) # Apache 2
api_proto_package(
has_services = True,
deps = [
- "//envoy/annotations:pkg",
"//envoy/config/core/v3:pkg",
- "//envoy/service/auth/v2:pkg",
+ "//envoy/extensions/filters/http/ext_proc/v3:pkg",
"//envoy/type/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
+ "@com_github_cncf_udpa//xds/annotations/v3:pkg",
],
)
diff --git a/generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto b/api/envoy/service/ext_proc/v3/external_processor.proto
similarity index 97%
rename from generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto
rename to api/envoy/service/ext_proc/v3/external_processor.proto
index 09572331aa42a..dc6b527d5bcc9 100644
--- a/generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto
+++ b/api/envoy/service/ext_proc/v3/external_processor.proto
@@ -1,22 +1,24 @@
syntax = "proto3";
-package envoy.service.ext_proc.v3alpha;
+package envoy.service.ext_proc.v3;
import "envoy/config/core/v3/base.proto";
-import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto";
+import "envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto";
import "envoy/type/v3/http_status.proto";
import "google/protobuf/struct.proto";
+import "xds/annotations/v3/status.proto";
+
import "udpa/annotations/status.proto";
import "validate/validate.proto";
-option java_package = "io.envoyproxy.envoy.service.ext_proc.v3alpha";
+option java_package = "io.envoyproxy.envoy.service.ext_proc.v3";
option java_outer_classname = "ExternalProcessorProto";
option java_multiple_files = true;
option java_generic_services = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
+option (xds.annotations.v3.file_status).work_in_progress = true;
// [#protodoc-title: External Processing Service]
@@ -167,7 +169,7 @@ message ProcessingResponse {
// for the duration of this particular request/response only. Servers
// may use this to intelligently control how requests are processed
// based on the headers and other metadata that they see.
- envoy.extensions.filters.http.ext_proc.v3alpha.ProcessingMode mode_override = 9;
+ envoy.extensions.filters.http.ext_proc.v3.ProcessingMode mode_override = 9;
}
// The following are messages that are sent to the server.
diff --git a/api/envoy/service/ext_proc/v3alpha/BUILD b/api/envoy/service/ext_proc/v3alpha/BUILD
deleted file mode 100644
index 4f3730e2af32e..0000000000000
--- a/api/envoy/service/ext_proc/v3alpha/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
-
-load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
-
-licenses(["notice"]) # Apache 2
-
-api_proto_package(
- has_services = True,
- deps = [
- "//envoy/config/core/v3:pkg",
- "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg",
- "//envoy/type/v3:pkg",
- "@com_github_cncf_udpa//udpa/annotations:pkg",
- ],
-)
diff --git a/api/envoy/service/ext_proc/v3alpha/external_processor.proto b/api/envoy/service/ext_proc/v3alpha/external_processor.proto
deleted file mode 100644
index 09572331aa42a..0000000000000
--- a/api/envoy/service/ext_proc/v3alpha/external_processor.proto
+++ /dev/null
@@ -1,331 +0,0 @@
-syntax = "proto3";
-
-package envoy.service.ext_proc.v3alpha;
-
-import "envoy/config/core/v3/base.proto";
-import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto";
-import "envoy/type/v3/http_status.proto";
-
-import "google/protobuf/struct.proto";
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.service.ext_proc.v3alpha";
-option java_outer_classname = "ExternalProcessorProto";
-option java_multiple_files = true;
-option java_generic_services = true;
-option (udpa.annotations.file_status).work_in_progress = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: External Processing Service]
-
-// A service that can access and modify HTTP requests and responses
-// as part of a filter chain.
-// The overall external processing protocol works like this:
-//
-// 1. Envoy sends to the service information about the HTTP request.
-// 2. The service sends back a ProcessingResponse message that directs Envoy
-// to either stop processing, continue without it, or send it the
-// next chunk of the message body.
-// 3. If so requested, Envoy sends the server chunks of the message body,
-// or the entire body at once. In either case, the server sends back
-// a ProcessingResponse after each message it receives.
-// 4. If so requested, Envoy sends the server the HTTP trailers,
-// and the server sends back a ProcessingResponse.
-// 5. At this point, request processing is done, and we pick up again
-// at step 1 when Envoy receives a response from the upstream server.
-// 6. At any point above, if the server closes the gRPC stream cleanly,
-// then Envoy proceeds without consulting the server.
-// 7. At any point above, if the server closes the gRPC stream with an error,
-// then Envoy returns a 500 error to the client, unless the filter
-// was configured to ignore errors.
-//
-// In other words, the process is a request/response conversation, but
-// using a gRPC stream to make it easier for the server to
-// maintain state.
-
-service ExternalProcessor {
- // This begins the bidirectional stream that Envoy will use to
- // give the server control over what the filter does. The actual
- // protocol is described by the ProcessingRequest and ProcessingResponse
- // messages below.
- rpc Process(stream ProcessingRequest) returns (stream ProcessingResponse) {
- }
-}
-
-// This represents the different types of messages that Envoy can send
-// to an external processing server.
-// [#next-free-field: 8]
-message ProcessingRequest {
- // Specify whether the filter that sent this request is running in synchronous
- // or asynchronous mode. The choice of synchronous or asynchronous mode
- // can be set in the filter configuration, and defaults to false.
- //
- // * A value of "false" indicates that the server must respond
- // to this message by either sending back a matching ProcessingResponse message,
- // or by closing the stream.
- // * A value of "true" indicates that the server must not respond to this
- // message, although it may still close the stream to indicate that no more messages
- // are needed.
- //
- bool async_mode = 1;
-
- // Each request message will include one of the following sub-messages. Which
- // ones are set for a particular HTTP request/response depend on the
- // processing mode.
- oneof request {
- option (validate.required) = true;
-
- // Information about the HTTP request headers, as well as peer info and additional
- // properties. Unless "async_mode" is true, the server must send back a
- // HeaderResponse message, an ImmediateResponse message, or close the stream.
- HttpHeaders request_headers = 2;
-
- // Information about the HTTP response headers, as well as peer info and additional
- // properties. Unless "async_mode" is true, the server must send back a
- // HeaderResponse message or close the stream.
- HttpHeaders response_headers = 3;
-
- // A chunk of the HTTP request body. Unless "async_mode" is true, the server must send back
- // a BodyResponse message, an ImmediateResponse message, or close the stream.
- HttpBody request_body = 4;
-
- // A chunk of the HTTP request body. Unless "async_mode" is true, the server must send back
- // a BodyResponse message or close the stream.
- HttpBody response_body = 5;
-
- // The HTTP trailers for the request path. Unless "async_mode" is true, the server
- // must send back a TrailerResponse message or close the stream.
- //
- // This message is only sent if the trailers processing mode is set to "SEND".
- // If there are no trailers on the original downstream request, then this message
- // will only be sent (with empty trailers waiting to be populated) if the
- // processing mode is set before the request headers are sent, such as
- // in the filter configuration.
- HttpTrailers request_trailers = 6;
-
- // The HTTP trailers for the response path. Unless "async_mode" is true, the server
- // must send back a TrailerResponse message or close the stream.
- //
- // This message is only sent if the trailers processing mode is set to "SEND".
- // If there are no trailers on the original downstream request, then this message
- // will only be sent (with empty trailers waiting to be populated) if the
- // processing mode is set before the request headers are sent, such as
- // in the filter configuration.
- HttpTrailers response_trailers = 7;
- }
-}
-
-// For every ProcessingRequest received by the server with the "async_mode" field
-// set to false, the server must send back exactly one ProcessingResponse message.
-// [#next-free-field: 10]
-message ProcessingResponse {
- oneof response {
- option (validate.required) = true;
-
- // The server must send back this message in response to a message with the
- // "request_headers" field set.
- HeadersResponse request_headers = 1;
-
- // The server must send back this message in response to a message with the
- // "response_headers" field set.
- HeadersResponse response_headers = 2;
-
- // The server must send back this message in response to a message with
- // the "request_body" field set.
- BodyResponse request_body = 3;
-
- // The server must send back this message in response to a message with
- // the "response_body" field set.
- BodyResponse response_body = 4;
-
- // The server must send back this message in response to a message with
- // the "request_trailers" field set.
- TrailersResponse request_trailers = 5;
-
- // The server must send back this message in response to a message with
- // the "response_trailers" field set.
- TrailersResponse response_trailers = 6;
-
- // If specified, attempt to create a locally generated response, send it
- // downstream, and stop processing additional filters and ignore any
- // additional messages received from the remote server for this request or
- // response. If a response has already started -- for example, if this
- // message is sent response to a "response_body" message -- then
- // this will either ship the reply directly to the downstream codec,
- // or reset the stream.
- ImmediateResponse immediate_response = 7;
- }
-
- // [#not-implemented-hide:]
- // Optional metadata that will be emitted as dynamic metadata to be consumed by the next
- // filter. This metadata will be placed in the namespace "envoy.filters.http.ext_proc".
- google.protobuf.Struct dynamic_metadata = 8;
-
- // Override how parts of the HTTP request and response are processed
- // for the duration of this particular request/response only. Servers
- // may use this to intelligently control how requests are processed
- // based on the headers and other metadata that they see.
- envoy.extensions.filters.http.ext_proc.v3alpha.ProcessingMode mode_override = 9;
-}
-
-// The following are messages that are sent to the server.
-
-// This message is sent to the external server when the HTTP request and responses
-// are first received.
-message HttpHeaders {
- // The HTTP request headers. All header keys will be
- // lower-cased, because HTTP header keys are case-insensitive.
- config.core.v3.HeaderMap headers = 1;
-
- // [#not-implemented-hide:]
- // The values of properties selected by the "request_attributes"
- // or "response_attributes" list in the configuration. Each entry
- // in the list is populated
- // from the standard :ref:`attributes `
- // supported across Envoy.
- map attributes = 2;
-
- // If true, then there is no message body associated with this
- // request or response.
- bool end_of_stream = 3;
-}
-
-// This message contains the message body that Envoy sends to the external server.
-message HttpBody {
- bytes body = 1;
-
- bool end_of_stream = 2;
-}
-
-// This message contains the trailers.
-message HttpTrailers {
- config.core.v3.HeaderMap trailers = 1;
-}
-
-// The following are messages that may be sent back by the server.
-
-// This message must be sent in response to an HttpHeaders message.
-message HeadersResponse {
- CommonResponse response = 1;
-}
-
-// This message must be sent in response to an HttpTrailers message.
-message TrailersResponse {
- // Instructions on how to manipulate the trailers
- HeaderMutation header_mutation = 1;
-}
-
-// This message must be sent in response to an HttpBody message.
-message BodyResponse {
- CommonResponse response = 1;
-}
-
-// This message contains common fields between header and body responses.
-// [#next-free-field: 6]
-message CommonResponse {
- enum ResponseStatus {
- // Apply the mutation instructions in this message to the
- // request or response, and then continue processing the filter
- // stream as normal. This is the default.
- CONTINUE = 0;
-
- // Apply the specified header mutation, replace the body with the body
- // specified in the body mutation (if present), and do not send any
- // further messages for this request or response even if the processing
- // mode is configured to do so.
- //
- // When used in response to a request_headers or response_headers message,
- // this status makes it possible to either completely replace the body
- // while discarding the original body, or to add a body to a message that
- // formerly did not have one.
- //
- // In other words, this response makes it possible to turn an HTTP GET
- // into a POST, PUT, or PATCH.
- CONTINUE_AND_REPLACE = 1;
- }
-
- // If set, provide additional direction on how the Envoy proxy should
- // handle the rest of the HTTP filter chain.
- ResponseStatus status = 1 [(validate.rules).enum = {defined_only: true}];
-
- // Instructions on how to manipulate the headers. When responding to an
- // HttpBody request, header mutations will only take effect if
- // the current processing mode for the body is BUFFERED.
- HeaderMutation header_mutation = 2;
-
- // Replace the body of the last message sent to the remote server on this
- // stream. If responding to an HttpBody request, simply replace or clear
- // the body chunk that was sent with that request. Body mutations only take
- // effect in response to "body" messages and are ignored otherwise.
- BodyMutation body_mutation = 3;
-
- // [#not-implemented-hide:]
- // Add new trailers to the message. This may be used when responding to either a
- // HttpHeaders or HttpBody message, but only if this message is returned
- // along with the CONTINUE_AND_REPLACE status.
- config.core.v3.HeaderMap trailers = 4;
-
- // Clear the route cache for the current request.
- // This is necessary if the remote server
- // modified headers that are used to calculate the route.
- bool clear_route_cache = 5;
-}
-
-// This message causes the filter to attempt to create a locally
-// generated response, send it downstream, stop processing
-// additional filters, and ignore any additional messages received
-// from the remote server for this request or response. If a response
-// has already started, then this will either ship the reply directly
-// to the downstream codec, or reset the stream.
-// [#next-free-field: 6]
-message ImmediateResponse {
- // The response code to return
- type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}];
-
- // Apply changes to the default headers, which will include content-type.
- HeaderMutation headers = 2;
-
- // The message body to return with the response which is sent using the
- // text/plain content type, or encoded in the grpc-message header.
- string body = 3;
-
- // If set, then include a gRPC status trailer.
- GrpcStatus grpc_status = 4;
-
- // A string detailing why this local reply was sent, which may be included
- // in log and debug output.
- string details = 5;
-}
-
-// This message specifies a gRPC status for an ImmediateResponse message.
-message GrpcStatus {
- // The actual gRPC status
- uint32 status = 1;
-}
-
-// Change HTTP headers or trailers by appending, replacing, or removing
-// headers.
-message HeaderMutation {
- // Add or replace HTTP headers. Attempts to set the value of
- // any "x-envoy" header, and attempts to set the ":method",
- // ":authority", ":scheme", or "host" headers will be ignored.
- repeated config.core.v3.HeaderValueOption set_headers = 1;
-
- // Remove these HTTP headers. Attempts to remove system headers --
- // any header starting with ":", plus "host" -- will be ignored.
- repeated string remove_headers = 2;
-}
-
-// Replace the entire message body chunk received in the corresponding
-// HttpBody message with this new body, or clear the body.
-message BodyMutation {
- oneof mutation {
- // The entire body to replace
- bytes body = 1;
-
- // Clear the corresponding body chunk
- bool clear_body = 2;
- }
-}
diff --git a/api/envoy/service/ratelimit/v3/rls.proto b/api/envoy/service/ratelimit/v3/rls.proto
index ab8e0ffc0eba7..113998c4082de 100644
--- a/api/envoy/service/ratelimit/v3/rls.proto
+++ b/api/envoy/service/ratelimit/v3/rls.proto
@@ -53,7 +53,7 @@ message RateLimitRequest {
}
// A response from a ShouldRateLimit call.
-// [#next-free-field: 7]
+// [#next-free-field: 8]
message RateLimitResponse {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.ratelimit.v2.RateLimitResponse";
@@ -103,8 +103,15 @@ message RateLimitResponse {
Unit unit = 2;
}
- // Cacheable quota for responses, see documentation for the :ref:`quota
- // ` field.
+ // Cacheable quota for responses.
+ // Quota can be granted at different levels: either for each individual descriptor or for the whole descriptor set.
+ // This is a certain number of requests over a period of time.
+ // The client may cache this result and apply the effective RateLimitResponse to future matching
+ // requests without querying rate limit service.
+ //
+ // When quota expires due to timeout, a new RLS request will also be made.
+ // The implementation may choose to preemptively query the rate limit server for more quota on or
+ // before expiration or before the available quota runs out.
// [#not-implemented-hide:]
message Quota {
// Number of matching requests granted in quota. Must be 1 or more.
@@ -114,6 +121,15 @@ message RateLimitResponse {
// Point in time at which the quota expires.
google.protobuf.Timestamp valid_until = 2;
}
+
+ // The unique id that is associated with each Quota either at individual descriptor level or whole descriptor set level.
+ //
+ // For a matching policy with boolean logic, for example, match: "request.headers['environment'] == 'staging' || request.headers['environment'] == 'dev'"),
+ // the request_headers action produces a distinct list of descriptors for each possible value of the ‘environment’ header even though the granted quota is same.
+ // Thus, the client will use this id information (returned from RLS server) to correctly correlate the multiple descriptors/descriptor sets that have been granted with same quota (i.e., share the same quota among multiple descriptors or descriptor sets.)
+ //
+ // If id is empty, this id field will be ignored. If quota for the same id changes (e.g. due to configuration update), the old quota will be overridden by the new one. Shared quotas referenced by ID will still adhere to expiration after `valid_until`.
+ string id = 3;
}
// [#next-free-field: 6]
@@ -133,12 +149,9 @@ message RateLimitResponse {
// Duration until reset of the current limit window.
google.protobuf.Duration duration_until_reset = 4;
- // Quota granted for the descriptor. This is a certain number of requests over a period of time.
- // The client may cache this result and apply the effective RateLimitResponse to future matching
- // requests containing a matching descriptor without querying rate limit service.
- //
// Quota is available for a request if its descriptor set has cached quota available for all
// descriptors.
+ // This is for each individual descriptor in the descriptor set. The client will perform matches for each individual descriptor against available per-descriptor quota.
//
// If quota is available, a RLS request will not be made and the quota will be reduced by 1 for
// all matching descriptors.
@@ -159,10 +172,6 @@ message RateLimitResponse {
// If the server did not provide a quota, such as the quota message is empty for some of
// the descriptors, then the request admission is determined by the
// :ref:`overall_code `.
- //
- // When quota expires due to timeout, a new RLS request will also be made.
- // The implementation may choose to preemptively query the rate limit server for more quota on or
- // before expiration or before the available quota runs out.
// [#not-implemented-hide:]
Quota quota = 5;
}
@@ -193,4 +202,17 @@ message RateLimitResponse {
// - :ref:`envoy.filters.network.ratelimit ` for network filter.
// - :ref:`envoy.filters.thrift.rate_limit ` for Thrift filter.
google.protobuf.Struct dynamic_metadata = 6;
+
+ // Quota is available for a request if its entire descriptor set has cached quota available.
+ // This is a union of all descriptors in the descriptor set. Clients can use the quota for future matches if and only if the descriptor set matches what was sent in the request that originated this response.
+ //
+ // If quota is available, a RLS request will not be made and the quota will be reduced by 1.
+ // If quota is not available (i.e., a cached entry doesn't exist for a RLS descriptor set), a RLS request will be triggered.
+ // If the server did not provide a quota, such as the quota message is empty then the request admission is determined by the
+ // :ref:`overall_code `.
+ //
+ // If there is not sufficient quota and the cached entry exists for a RLS descriptor set is out-of-quota but not expired,
+ // the request will be treated as OVER_LIMIT.
+ // [#not-implemented-hide:]
+ Quota quota = 7;
}
diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD b/api/envoy/watchdog/v3/BUILD
similarity index 100%
rename from generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD
rename to api/envoy/watchdog/v3/BUILD
diff --git a/api/envoy/watchdog/v3alpha/README.md b/api/envoy/watchdog/v3/README.md
similarity index 100%
rename from api/envoy/watchdog/v3alpha/README.md
rename to api/envoy/watchdog/v3/README.md
diff --git a/api/envoy/watchdog/v3alpha/abort_action.proto b/api/envoy/watchdog/v3/abort_action.proto
similarity index 85%
rename from api/envoy/watchdog/v3alpha/abort_action.proto
rename to api/envoy/watchdog/v3/abort_action.proto
index d6f34aa892cdb..325c3d3dc7a85 100644
--- a/api/envoy/watchdog/v3alpha/abort_action.proto
+++ b/api/envoy/watchdog/v3/abort_action.proto
@@ -1,15 +1,14 @@
syntax = "proto3";
-package envoy.watchdog.v3alpha;
+package envoy.watchdog.v3;
import "google/protobuf/duration.proto";
import "udpa/annotations/status.proto";
-option java_package = "io.envoyproxy.envoy.watchdog.v3alpha";
+option java_package = "io.envoyproxy.envoy.watchdog.v3";
option java_outer_classname = "AbortActionProto";
option java_multiple_files = true;
-option (udpa.annotations.file_status).work_in_progress = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.]
diff --git a/api/tools/generate_listeners_test.py b/api/tools/generate_listeners_test.py
index f67ef4bbb5aab..1defb3f666986 100644
--- a/api/tools/generate_listeners_test.py
+++ b/api/tools/generate_listeners_test.py
@@ -5,7 +5,7 @@
import generate_listeners
if __name__ == "__main__":
- srcdir = os.path.join(os.getenv("TEST_SRCDIR"), 'envoy_api_canonical')
+ srcdir = os.path.join(os.getenv("TEST_SRCDIR"), 'envoy_api')
generate_listeners.generate_listeners(
os.path.join(srcdir, "examples/service_envoy/listeners.pb"), "/dev/stdout", "/dev/stdout",
iter([os.path.join(srcdir, "examples/service_envoy/http_connection_manager.pb")]))
diff --git a/api/tools/tap2pcap.py b/api/tools/tap2pcap.py
index 93a8610399285..bcb13fdf9a093 100644
--- a/api/tools/tap2pcap.py
+++ b/api/tools/tap2pcap.py
@@ -8,7 +8,7 @@
Usage:
-bazel run @envoy_api_canonical//tools:tap2pcap
+bazel run @envoy_api//tools:tap2pcap
Known issues:
- IPv6 PCAP generation has malformed TCP packets. This appears to be a text2pcap
diff --git a/api/tools/tap2pcap_test.py b/api/tools/tap2pcap_test.py
index fd13cf32ff694..c0151846f5e18 100644
--- a/api/tools/tap2pcap_test.py
+++ b/api/tools/tap2pcap_test.py
@@ -11,7 +11,7 @@
# a golden output file for the tshark dump. Since we run tap2pcap in a
# subshell with a limited environment, the inferred time zone should be UTC.
if __name__ == '__main__':
- srcdir = os.path.join(os.getenv('TEST_SRCDIR'), 'envoy_api_canonical')
+ srcdir = os.path.join(os.getenv('TEST_SRCDIR'), 'envoy_api')
tap_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.pb_text')
expected_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.txt')
pcap_path = os.path.join(os.getenv('TEST_TMPDIR'), 'generated.pcap')
diff --git a/api/versioning/BUILD b/api/versioning/BUILD
index 61af4c4764680..8febd11d209e2 100644
--- a/api/versioning/BUILD
+++ b/api/versioning/BUILD
@@ -16,10 +16,14 @@ proto_library(
"//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg",
"//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg",
"//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg",
+ "//contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha:pkg",
+ "//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg",
+ "//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg",
"//envoy/admin/v3:pkg",
"//envoy/config/accesslog/v3:pkg",
"//envoy/config/bootstrap/v3:pkg",
"//envoy/config/cluster/v3:pkg",
+ "//envoy/config/common/key_value/v3:pkg",
"//envoy/config/common/matcher/v3:pkg",
"//envoy/config/core/v3:pkg",
"//envoy/config/endpoint/v3:pkg",
@@ -45,15 +49,14 @@ proto_library(
"//envoy/data/tap/v3:pkg",
"//envoy/extensions/access_loggers/file/v3:pkg",
"//envoy/extensions/access_loggers/grpc/v3:pkg",
- "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg",
+ "//envoy/extensions/access_loggers/open_telemetry/v3:pkg",
"//envoy/extensions/access_loggers/stream/v3:pkg",
"//envoy/extensions/access_loggers/wasm/v3:pkg",
- "//envoy/extensions/cache/simple_http_cache/v3alpha:pkg",
+ "//envoy/extensions/cache/simple_http_cache/v3:pkg",
"//envoy/extensions/clusters/aggregate/v3:pkg",
"//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg",
"//envoy/extensions/clusters/redis/v3:pkg",
"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg",
- "//envoy/extensions/common/key_value/v3:pkg",
"//envoy/extensions/common/matching/v3:pkg",
"//envoy/extensions/common/ratelimit/v3:pkg",
"//envoy/extensions/common/tap/v3:pkg",
@@ -65,14 +68,14 @@ proto_library(
"//envoy/extensions/filters/common/fault/v3:pkg",
"//envoy/extensions/filters/common/matcher/action/v3:pkg",
"//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg",
- "//envoy/extensions/filters/http/admission_control/v3alpha:pkg",
+ "//envoy/extensions/filters/http/admission_control/v3:pkg",
"//envoy/extensions/filters/http/alternate_protocols_cache/v3:pkg",
"//envoy/extensions/filters/http/aws_lambda/v3:pkg",
"//envoy/extensions/filters/http/aws_request_signing/v3:pkg",
- "//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg",
+ "//envoy/extensions/filters/http/bandwidth_limit/v3:pkg",
"//envoy/extensions/filters/http/buffer/v3:pkg",
- "//envoy/extensions/filters/http/cache/v3alpha:pkg",
- "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg",
+ "//envoy/extensions/filters/http/cache/v3:pkg",
+ "//envoy/extensions/filters/http/cdn_loop/v3:pkg",
"//envoy/extensions/filters/http/composite/v3:pkg",
"//envoy/extensions/filters/http/compressor/v3:pkg",
"//envoy/extensions/filters/http/cors/v3:pkg",
@@ -81,7 +84,7 @@ proto_library(
"//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg",
"//envoy/extensions/filters/http/dynamo/v3:pkg",
"//envoy/extensions/filters/http/ext_authz/v3:pkg",
- "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg",
+ "//envoy/extensions/filters/http/ext_proc/v3:pkg",
"//envoy/extensions/filters/http/fault/v3:pkg",
"//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg",
"//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg",
@@ -96,7 +99,7 @@ proto_library(
"//envoy/extensions/filters/http/kill_request/v3:pkg",
"//envoy/extensions/filters/http/local_ratelimit/v3:pkg",
"//envoy/extensions/filters/http/lua/v3:pkg",
- "//envoy/extensions/filters/http/oauth2/v3alpha:pkg",
+ "//envoy/extensions/filters/http/oauth2/v3:pkg",
"//envoy/extensions/filters/http/on_demand/v3:pkg",
"//envoy/extensions/filters/http/original_src/v3:pkg",
"//envoy/extensions/filters/http/ratelimit/v3:pkg",
@@ -124,14 +127,14 @@ proto_library(
"//envoy/extensions/filters/network/rbac/v3:pkg",
"//envoy/extensions/filters/network/redis_proxy/v3:pkg",
"//envoy/extensions/filters/network/sni_cluster/v3:pkg",
- "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg",
+ "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg",
"//envoy/extensions/filters/network/tcp_proxy/v3:pkg",
"//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg",
"//envoy/extensions/filters/network/thrift_proxy/router/v3:pkg",
"//envoy/extensions/filters/network/thrift_proxy/v3:pkg",
"//envoy/extensions/filters/network/wasm/v3:pkg",
"//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg",
- "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg",
+ "//envoy/extensions/filters/udp/dns_filter/v3:pkg",
"//envoy/extensions/filters/udp/udp_proxy/v3:pkg",
"//envoy/extensions/formatter/metadata/v3:pkg",
"//envoy/extensions/formatter/req_without_query/v3:pkg",
@@ -150,6 +153,7 @@ proto_library(
"//envoy/extensions/quic/crypto_stream/v3:pkg",
"//envoy/extensions/quic/proof_source/v3:pkg",
"//envoy/extensions/rate_limit_descriptors/expr/v3:pkg",
+ "//envoy/extensions/rbac/matchers/upstream_ip_port/v3:pkg",
"//envoy/extensions/request_id/uuid/v3:pkg",
"//envoy/extensions/resource_monitors/fixed_heap/v3:pkg",
"//envoy/extensions/resource_monitors/injected_resource/v3:pkg",
@@ -163,7 +167,7 @@ proto_library(
"//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg",
"//envoy/extensions/transport_sockets/quic/v3:pkg",
"//envoy/extensions/transport_sockets/raw_buffer/v3:pkg",
- "//envoy/extensions/transport_sockets/s2a/v3alpha:pkg",
+ "//envoy/extensions/transport_sockets/s2a/v3:pkg",
"//envoy/extensions/transport_sockets/starttls/v3:pkg",
"//envoy/extensions/transport_sockets/tap/v3:pkg",
"//envoy/extensions/transport_sockets/tls/v3:pkg",
@@ -173,14 +177,14 @@ proto_library(
"//envoy/extensions/upstreams/http/v3:pkg",
"//envoy/extensions/upstreams/tcp/generic/v3:pkg",
"//envoy/extensions/wasm/v3:pkg",
- "//envoy/extensions/watchdog/profile_action/v3alpha:pkg",
+ "//envoy/extensions/watchdog/profile_action/v3:pkg",
"//envoy/service/accesslog/v3:pkg",
"//envoy/service/auth/v3:pkg",
"//envoy/service/cluster/v3:pkg",
"//envoy/service/discovery/v3:pkg",
"//envoy/service/endpoint/v3:pkg",
"//envoy/service/event_reporting/v3:pkg",
- "//envoy/service/ext_proc/v3alpha:pkg",
+ "//envoy/service/ext_proc/v3:pkg",
"//envoy/service/extension/v3:pkg",
"//envoy/service/health/v3:pkg",
"//envoy/service/listener/v3:pkg",
@@ -198,7 +202,7 @@ proto_library(
"//envoy/type/metadata/v3:pkg",
"//envoy/type/tracing/v3:pkg",
"//envoy/type/v3:pkg",
- "//envoy/watchdog/v3alpha:pkg",
+ "//envoy/watchdog/v3:pkg",
],
)
diff --git a/bazel/BUILD b/bazel/BUILD
index 303ab531bead3..3b22ffc8ff878 100644
--- a/bazel/BUILD
+++ b/bazel/BUILD
@@ -1,8 +1,10 @@
load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library")
load("//bazel:envoy_build_system.bzl", "envoy_package")
load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp")
+load("//bazel:utils.bzl", "json_data")
load("@bazel_skylib//lib:selects.bzl", "selects")
load("@bazel_skylib//rules:common_settings.bzl", "bool_flag")
+load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC")
licenses(["notice"]) # Apache 2
@@ -591,3 +593,8 @@ alias(
name = "windows",
actual = "@bazel_tools//src/conditions:windows",
)
+
+json_data(
+ name = "repository_locations",
+ data = REPOSITORY_LOCATIONS_SPEC,
+)
diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md
index 34fc92b21f123..9820ff4cf993d 100644
--- a/bazel/EXTERNAL_DEPS.md
+++ b/bazel/EXTERNAL_DEPS.md
@@ -88,7 +88,7 @@ The name of the dependency can be found in
[the repository locations file.](https://github.com/envoyproxy/envoy/blob/main/bazel/repository_locations.bzl)
The path of the local copy has to be absolute path.
-For repositories built by `envoy_cmake_external()` in `bazel/foreign_cc/BUILD`,
+For repositories built by `envoy_cmake()` in `bazel/foreign_cc/BUILD`,
it is necessary to populate the local copy with some additional Bazel machinery
to support `--override_repository`:
1. Place an empty `WORKSPACE` in the root.
diff --git a/bazel/README.md b/bazel/README.md
index 3828e675a0b37..9337efb33ca81 100644
--- a/bazel/README.md
+++ b/bazel/README.md
@@ -20,7 +20,7 @@ On Windows, run the following commands:
```cmd
mkdir %USERPROFILE%\bazel
powershell Invoke-WebRequest https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-windows-amd64.exe -OutFile %USERPROFILE%\bazel\bazel.exe
-set PATH=%PATH%;%USERPROFILE%\bazel
+set PATH=%USERPROFILE%\bazel;%PATH%
```
## Production environments
@@ -30,7 +30,7 @@ dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#require
independently sourced, the following steps should be followed:
1. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#requirements).
-1. `bazel build -c opt //source/exe:envoy-static` from the repository root.
+1. `bazel build -c opt envoy` from the repository root.
## Quick start Bazel build for developers
@@ -154,8 +154,8 @@ for how to update or override dependencies.
package.
```cmd
mklink %USERPROFILE%\Python39\python3.exe %USERPROFILE%\Python39\python.exe
- set PATH=%PATH%;%USERPROFILE%\Python39
- set PATH=%PATH%;%USERPROFILE%\Python39\Scripts
+ set PATH=%USERPROFILE%\Python39;%PATH%
+ set PATH=%USERPROFILE%\Python39\Scripts;%PATH%
pip install wheel
```
@@ -169,7 +169,7 @@ for how to update or override dependencies.
which is determined by their relative ordering in your PATH.
```cmd
set BAZEL_VC=%USERPROFILE%\VSBT2019\VC
- set PATH=%PATH%;%USERPROFILE%\VSBT2019\VC\Tools\MSVC\14.26.28801\bin\Hostx64\x64
+ set PATH=%USERPROFILE%\VSBT2019\VC\Tools\MSVC\14.26.28801\bin\Hostx64\x64;%PATH%
```
The Windows SDK contains header files and libraries you need when building Windows applications. Bazel always uses the latest, but you can specify a different version by setting the environment variable `BAZEL_WINSDK_FULL_VERSION`. See [bazel/windows](https://docs.bazel.build/versions/master/windows.html)
@@ -179,8 +179,8 @@ for how to update or override dependencies.
the project's GCP CI remote build environment, so 64 bit builds from the CMake and ninja
projects are used instead.
```cmd
- set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin
- set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja
+ set PATH=%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin;%PATH%
+ set PATH=%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja;%PATH%
```
[MSYS2 shell](https://msys2.github.io/): Install to a path with no spaces, e.g. C:\msys64.
@@ -189,7 +189,7 @@ for how to update or override dependencies.
executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment variable to a value
of `*` is often advisable to ensure argument parsing in the MSYS2 shell behaves as expected.
```cmd
- set PATH=%PATH%;%USERPROFILE%\msys64\usr\bin
+ set PATH=%USERPROFILE%\msys64\usr\bin;%PATH%
set BAZEL_SH=%USERPROFILE%\msys64\usr\bin\bash.exe
set MSYS2_ARG_CONV_EXCL=*
set MSYS2_PATH_TYPE=inherit
@@ -216,7 +216,7 @@ for how to update or override dependencies.
[Git](https://git-scm.com/downloads): This version from the Git project, or the version
distributed using pacman under MSYS2 will both work, ensure one is on the PATH:.
```cmd
- set PATH=%PATH%;%USERPROFILE%\Git\bin
+ set PATH=%USERPROFILE%\Git\bin;%PATH%
```
Lastly, persist environment variable changes.
@@ -236,7 +236,7 @@ for how to update or override dependencies.
in your shell for buildifier to work.
1. `go get -u github.com/bazelbuild/buildtools/buildozer` to install buildozer. You may need to set `BUILDOZER_BIN` to `$GOPATH/bin/buildozer`
in your shell for buildozer to work.
-1. `bazel build //source/exe:envoy-static` from the Envoy source directory. Add `-c opt` for an optimized release build or
+1. `bazel build envoy` from the Envoy source directory. Add `-c opt` for an optimized release build or
`-c dbg` for an unoptimized, fully instrumented debugging build.
## Building Envoy with the CI Docker image
@@ -270,7 +270,7 @@ To build Envoy with a remote build services, run Bazel with your remote build se
For example the following command runs build with the GCP RBE service used in CI:
```
-bazel build //source/exe:envoy-static --config=remote-clang \
+bazel build envoy --config=remote-clang \
--remote_cache=grpcs://remotebuildexecution.googleapis.com \
--remote_executor=grpcs://remotebuildexecution.googleapis.com \
--remote_instance_name=projects/envoy-ci/instances/default_instance
@@ -289,7 +289,7 @@ Building Envoy with Docker sandbox uses the same Docker image used in CI with fi
output which is not depending on your local C++ toolchain. It can also help debugging issues with RBE. To build Envoy with Docker sandbox:
```
-bazel build //source/exe:envoy-static --config=docker-clang
+bazel build envoy --config=docker-clang
```
Tests can be run in docker sandbox too. Note that the network environment, such as IPv6, may be different in the docker sandbox so you may want
@@ -299,7 +299,7 @@ set different options. See below to configure test IP versions.
To link Envoy against libc++, follow the [quick start](#quick-start-bazel-build-for-developers) to setup Clang+LLVM and run:
```
-bazel build --config=libc++ //source/exe:envoy-static
+bazel build --config=libc++ envoy
```
Or use our configuration with Remote Execution or Docker sandbox, pass `--config=remote-clang-libc++` or
@@ -522,14 +522,14 @@ that Bazel supports:
You can use the `-c ` flag to control this, e.g.
```
-bazel build -c opt //source/exe:envoy-static
+bazel build -c opt envoy
```
To override the compilation mode and optimize the build for binary size, you can
use the `sizeopt` configuration:
```
-bazel build //source/exe:envoy-static --config=sizeopt
+bazel build envoy --config=sizeopt
```
## Sanitizers
@@ -751,7 +751,7 @@ They should also ignore any local `.bazelrc` for reproducibility. This can be
achieved with:
```
-bazel --bazelrc=/dev/null build -c opt //source/exe:envoy-static.stripped
+bazel --bazelrc=/dev/null build -c opt envoy.stripped
```
One caveat to note is that the Git SHA1 is truncated to 16 bytes today as a
@@ -818,7 +818,7 @@ resources, you can override Bazel's default job parallelism determination with
`--jobs=N` to restrict the build to at most `N` simultaneous jobs, e.g.:
```
-bazel build --jobs=2 //source/exe:envoy-static
+bazel build --jobs=2 envoy
```
# Debugging the Bazel build
@@ -827,19 +827,19 @@ When trying to understand what Bazel is doing, the `-s` and `--explain` options
are useful. To have Bazel provide verbose output on which commands it is executing:
```
-bazel build -s //source/exe:envoy-static
+bazel build -s envoy
```
To have Bazel emit to a text file the rationale for rebuilding a target:
```
-bazel build --explain=file.txt //source/exe:envoy-static
+bazel build --explain=file.txt envoy
```
To get more verbose explanations:
```
-bazel build --explain=file.txt --verbose_explanations //source/exe:envoy-static
+bazel build --explain=file.txt --verbose_explanations envoy
```
# Resolving paths in bazel build output
diff --git a/bazel/api_binding.bzl b/bazel/api_binding.bzl
index 362e1803a1ef8..97718ef5346b9 100644
--- a/bazel/api_binding.bzl
+++ b/bazel/api_binding.bzl
@@ -13,6 +13,7 @@ def _default_envoy_api_impl(ctx):
]
for d in api_dirs:
ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child(ctx.attr.reldir).get_child(d), d)
+ ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child("bazel").get_child("utils.bzl"), "utils.bzl")
_default_envoy_api = repository_rule(
implementation = _default_envoy_api_impl,
@@ -24,14 +25,9 @@ _default_envoy_api = repository_rule(
def envoy_api_binding():
# Treat the data plane API as an external repo, this simplifies exporting
- # the API to https://github.com/envoyproxy/data-plane-api. This is the
- # shadow API for Envoy internal use, see #9479.
+ # the API to https://github.com/envoyproxy/data-plane-api.
if "envoy_api" not in native.existing_rules().keys():
- _default_envoy_api(name = "envoy_api", reldir = "generated_api_shadow")
-
- # We also provide the non-shadowed API for developer use (see #9479).
- if "envoy_api_raw" not in native.existing_rules().keys():
- _default_envoy_api(name = "envoy_api_canonical", reldir = "api")
+ _default_envoy_api(name = "envoy_api", reldir = "api")
# TODO(https://github.com/envoyproxy/envoy/issues/7719) need to remove both bindings and use canonical rules
native.bind(
diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl
index 7c806b08c98a6..b382e3fd02cd9 100644
--- a/bazel/dependency_imports.bzl
+++ b/bazel/dependency_imports.bzl
@@ -1,6 +1,5 @@
-load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
+load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies")
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
-load("@envoy_build_tools//toolchains:rbe_toolchains_config.bzl", "rbe_toolchains_config")
load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe_exec_properties_dict", "custom_exec_properties")
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies")
@@ -9,15 +8,16 @@ load("@upb//bazel:workspace_deps.bzl", "upb_deps")
load("@rules_rust//rust:repositories.bzl", "rust_repositories")
load("@rules_antlr//antlr:deps.bzl", "antlr_dependencies")
load("@proxy_wasm_rust_sdk//bazel:dependencies.bzl", "proxy_wasm_rust_sdk_dependencies")
+load("@rules_cc//cc:repositories.bzl", "rules_cc_dependencies", "rules_cc_toolchains")
# go version for rules_go
GO_VERSION = "1.15.5"
def envoy_dependency_imports(go_version = GO_VERSION):
- rules_foreign_cc_dependencies()
+ # TODO: allow building of tools for easier onboarding
+ rules_foreign_cc_dependencies(register_default_tools = False, register_built_tools = False)
go_rules_dependencies()
go_register_toolchains(go_version)
- rbe_toolchains_config()
gazelle_dependencies()
apple_rules_dependencies()
rust_repositories()
@@ -28,6 +28,8 @@ def envoy_dependency_imports(go_version = GO_VERSION):
oss_fuzz = True,
honggfuzz = False,
)
+ rules_cc_dependencies()
+ rules_cc_toolchains()
custom_exec_properties(
name = "envoy_large_machine_exec_property",
diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl
index 4d671ab9562fa..f48ebe70564e9 100644
--- a/bazel/envoy_build_system.bzl
+++ b/bazel/envoy_build_system.bzl
@@ -1,6 +1,6 @@
# The main Envoy bazel file. Load this file for all Envoy-specific build macros
# and rules that you'd like to use in your BUILD files.
-load("@rules_foreign_cc//tools/build_defs:cmake.bzl", "cmake_external")
+load("@rules_foreign_cc//foreign_cc:cmake.bzl", "cmake")
load(":envoy_binary.bzl", _envoy_cc_binary = "envoy_cc_binary")
load(":envoy_internal.bzl", "envoy_external_dep_path")
load(
@@ -44,6 +44,7 @@ load(
)
load(
"@envoy_build_config//:extensions_build_config.bzl",
+ "CONTRIB_EXTENSION_PACKAGE_VISIBILITY",
"EXTENSION_PACKAGE_VISIBILITY",
)
load("@bazel_skylib//rules:common_settings.bzl", "bool_flag")
@@ -65,7 +66,7 @@ def envoy_extension_package(enabled_default = True, default_visibility = EXTENSI
)
def envoy_contrib_package():
- envoy_extension_package(default_visibility = ["//:contrib_library"])
+ envoy_extension_package(default_visibility = CONTRIB_EXTENSION_PACKAGE_VISIBILITY)
# A genrule variant that can output a directory. This is useful when doing things like
# generating a fuzz corpus mechanically.
@@ -91,15 +92,12 @@ envoy_directory_genrule = rule(
# External CMake C++ library targets should be specified with this function. This defaults
# to building the dependencies with ninja
-def envoy_cmake_external(
+def envoy_cmake(
name,
cache_entries = {},
debug_cache_entries = {},
- cmake_options = ["-GNinja"],
- make_commands = ["ninja -v", "ninja -v install"],
lib_source = "",
postfix_script = "",
- static_libraries = [],
copy_pdb = False,
pdb_name = "",
cmake_files_dir = "$BUILD_TMPDIR/CMakeFiles",
@@ -127,22 +125,23 @@ def envoy_cmake_external(
else:
pf = postfix_script
- cmake_external(
+ cmake(
name = name,
cache_entries = select({
"@envoy//bazel:dbg_build": cache_entries_debug,
"//conditions:default": cache_entries,
}),
- cmake_options = cmake_options,
+ generate_args = ["-GNinja"],
+ targets = ["", "install"],
+ # TODO: Remove install target and make this work
+ install = False,
# TODO(lizan): Make this always true
generate_crosstool_file = select({
"@envoy//bazel:windows_x86_64": True,
"//conditions:default": generate_crosstool_file,
}),
lib_source = lib_source,
- make_commands = make_commands,
postfix_script = pf,
- static_libraries = static_libraries,
**kwargs
)
diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl
index 6f9c9d83e30b3..9c5130f15e4b7 100644
--- a/bazel/envoy_internal.bzl
+++ b/bazel/envoy_internal.bzl
@@ -51,9 +51,9 @@ def envoy_copts(repository, test = False):
# debugging info detailing some 1600 test binaries would be wasteful.
# targets listed in order from generic to increasing specificity.
# Bazel adds an implicit -DNDEBUG for opt targets.
- repository + "//bazel:opt_build": [] if test else ["-ggdb3", "-gsplit-dwarf"],
+ repository + "//bazel:opt_build": [] if test else ["-ggdb3"],
repository + "//bazel:fastbuild_build": [],
- repository + "//bazel:dbg_build": ["-ggdb3", "-gsplit-dwarf"],
+ repository + "//bazel:dbg_build": ["-ggdb3"],
repository + "//bazel:windows_opt_build": [] if test else ["-Z7"],
repository + "//bazel:windows_fastbuild_build": [],
repository + "//bazel:windows_dbg_build": [],
diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl
index ac74d1be29c96..5b1d674483c8a 100644
--- a/bazel/envoy_library.bzl
+++ b/bazel/envoy_library.bzl
@@ -12,6 +12,7 @@ load(":envoy_pch.bzl", "envoy_pch_copts")
load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library")
load(
"@envoy_build_config//:extensions_build_config.bzl",
+ "CONTRIB_EXTENSION_PACKAGE_VISIBILITY",
"EXTENSION_CONFIG_VISIBILITY",
)
@@ -75,7 +76,7 @@ def envoy_cc_contrib_extension(
name,
tags = [],
extra_visibility = [],
- visibility = ["//:contrib_library"],
+ visibility = CONTRIB_EXTENSION_PACKAGE_VISIBILITY,
**kwargs):
envoy_cc_extension(name, tags, extra_visibility, visibility, **kwargs)
diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl
index 799e60154afc5..0cd48ba286200 100644
--- a/bazel/envoy_test.bzl
+++ b/bazel/envoy_test.bzl
@@ -169,10 +169,12 @@ def envoy_cc_test(
linkstatic = envoy_linkstatic(),
malloc = tcmalloc_external_dep(repository),
deps = envoy_stdlib_deps() + deps + [envoy_external_dep_path(dep) for dep in external_deps + ["googletest"]] + [
- repository + "//test:test_pch",
repository + "//test:main",
repository + "//test/test_common:test_version_linkstamp",
- ],
+ ] + select({
+ repository + "//bazel:clang_pch_build": [repository + "//test:test_pch"],
+ "//conditions:default": [],
+ }),
# from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51
# 2 - by default, mocks act as StrictMocks.
args = args + ["--gmock_default_mock_behavior=2"],
diff --git a/bazel/external/envoy_build_tools.patch b/bazel/external/envoy_build_tools.patch
new file mode 100644
index 0000000000000..33d5362b45f66
--- /dev/null
+++ b/bazel/external/envoy_build_tools.patch
@@ -0,0 +1,39 @@
+diff --git a/toolchains/configs/linux/clang/cc/cc_toolchain_config.bzl b/toolchains/configs/linux/clang/cc/cc_toolchain_config.bzl
+index 5dbaa86..3c90e3b 100755
+--- a/toolchains/configs/linux/clang/cc/cc_toolchain_config.bzl
++++ b/toolchains/configs/linux/clang/cc/cc_toolchain_config.bzl
+@@ -386,7 +386,7 @@ def _impl(ctx):
+ ],
+ flag_groups = [
+ flag_group(
+- flags = ["-gsplit-dwarf"],
++ flags = ["-gsplit-dwarf", "-g"],
+ expand_if_available = "per_object_debug_info_file",
+ ),
+ ],
+diff --git a/toolchains/configs/linux/clang_libcxx/cc/cc_toolchain_config.bzl b/toolchains/configs/linux/clang_libcxx/cc/cc_toolchain_config.bzl
+index 5dbaa86..3c90e3b 100755
+--- a/toolchains/configs/linux/clang_libcxx/cc/cc_toolchain_config.bzl
++++ b/toolchains/configs/linux/clang_libcxx/cc/cc_toolchain_config.bzl
+@@ -386,7 +386,7 @@ def _impl(ctx):
+ ],
+ flag_groups = [
+ flag_group(
+- flags = ["-gsplit-dwarf"],
++ flags = ["-gsplit-dwarf", "-g"],
+ expand_if_available = "per_object_debug_info_file",
+ ),
+ ],
+diff --git a/toolchains/configs/linux/gcc/cc/cc_toolchain_config.bzl b/toolchains/configs/linux/gcc/cc/cc_toolchain_config.bzl
+index 5dbaa86..3c90e3b 100755
+--- a/toolchains/configs/linux/gcc/cc/cc_toolchain_config.bzl
++++ b/toolchains/configs/linux/gcc/cc/cc_toolchain_config.bzl
+@@ -386,7 +386,7 @@ def _impl(ctx):
+ ],
+ flag_groups = [
+ flag_group(
+- flags = ["-gsplit-dwarf"],
++ flags = ["-gsplit-dwarf", "-g"],
+ expand_if_available = "per_object_debug_info_file",
+ ),
+ ],
diff --git a/bazel/external/googleurl.patch b/bazel/external/googleurl.patch
index e124821f9acee..8c27b8f327ceb 100644
--- a/bazel/external/googleurl.patch
+++ b/bazel/external/googleurl.patch
@@ -2,22 +2,22 @@
# project using clang-cl. Tracked in https://github.com/envoyproxy/envoy/issues/11974.
diff --git a/base/compiler_specific.h b/base/compiler_specific.h
-index 0cd36dc..8c4cbd4 100644
+index 6651220..a469c19 100644
--- a/base/compiler_specific.h
+++ b/base/compiler_specific.h
@@ -7,10 +7,6 @@
-
+
#include "build/build_config.h"
-
+
-#if defined(COMPILER_MSVC) && !defined(__clang__)
-#error "Only clang-cl is supported on Windows, see https://crbug.com/988071"
-#endif
-
- // Annotate a variable indicating it's ok if the variable is not used.
- // (Typically used to silence a compiler warning when the assignment
- // is important for some other reason.)
-@@ -55,8 +51,12 @@
- // prevent code folding, see gurl_base::debug::Alias.
+ // This is a wrapper around `__has_cpp_attribute`, which can be used to test for
+ // the presence of an attribute. In case the compiler does not support this
+ // macro it will simply evaluate to 0.
+@@ -75,8 +71,12 @@
+ // prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h.
// Use like:
// void NOT_TAIL_CALLED FooBar();
-#if defined(__clang__) && __has_attribute(not_tail_called)
@@ -30,10 +30,10 @@ index 0cd36dc..8c4cbd4 100644
#else
#define NOT_TAIL_CALLED
#endif
-@@ -226,7 +226,9 @@
+@@ -273,7 +273,9 @@
#endif
#endif
-
+
-#if defined(__clang__) && __has_attribute(uninitialized)
+#if defined(__clang__)
+#if defined(__has_attribute)
@@ -41,7 +41,7 @@ index 0cd36dc..8c4cbd4 100644
// Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for
// the specified variable.
// Library-wide alternative is
-@@ -257,6 +259,8 @@
+@@ -304,6 +306,8 @@
// E.g. platform, bot, benchmark or test name in patch description or next to
// the attribute.
#define STACK_UNINITIALIZED __attribute__((uninitialized))
@@ -50,13 +50,74 @@ index 0cd36dc..8c4cbd4 100644
#else
#define STACK_UNINITIALIZED
#endif
+@@ -365,8 +369,12 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
+ #endif // defined(__clang_analyzer__)
+
+ // Use nomerge attribute to disable optimization of merging multiple same calls.
+-#if defined(__clang__) && __has_attribute(nomerge)
++#if defined(__clang__)
++#if defined(__has_attribute)
++#if __has_attribute(nomerge)
+ #define NOMERGE [[clang::nomerge]]
++#endif
++#endif
+ #else
+ #define NOMERGE
+ #endif
+@@ -392,8 +400,12 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
+ // See also:
+ // https://clang.llvm.org/docs/AttributeReference.html#trivial-abi
+ // https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html
+-#if defined(__clang__) && __has_attribute(trivial_abi)
++#if defined(__clang__)
++#if defined(__has_attribute)
++#if __has_attribute(trivial_abi)
+ #define TRIVIAL_ABI [[clang::trivial_abi]]
++#endif
++#endif
+ #else
+ #define TRIVIAL_ABI
+ #endif
+@@ -401,8 +413,12 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
+ // Marks a member function as reinitializing a moved-from variable.
+ // See also
+ // https://clang.llvm.org/extra/clang-tidy/checks/bugprone-use-after-move.html#reinitialization
+-#if defined(__clang__) && __has_attribute(reinitializes)
++#if defined(__clang__)
++#if defined(__has_attribute)
++#if __has_attribute(reinitializes)
+ #define REINITIALIZES_AFTER_MOVE [[clang::reinitializes]]
++#endif
++#endif
+ #else
+ #define REINITIALIZES_AFTER_MOVE
+ #endif
+
+# TODO(keith): Remove once bazel supports newer NDK versions https://github.com/bazelbuild/bazel/issues/12889
+
+diff --git a/base/containers/checked_iterators.h b/base/containers/checked_iterators.h
+index b5fe925..31aa81e 100644
+--- a/base/containers/checked_iterators.h
++++ b/base/containers/checked_iterators.h
+@@ -237,9 +237,11 @@ using CheckedContiguousConstIterator = CheckedContiguousIterator;
+ // [3] https://wg21.link/pointer.traits.optmem
+ namespace std {
+
++#ifdef SUPPORTS_CPP_17_CONTIGUOUS_ITERATOR
+ template
+ struct __is_cpp17_contiguous_iterator<::gurl_base::CheckedContiguousIterator>
+ : true_type {};
++#endif
+
+ template
+ struct pointer_traits<::gurl_base::CheckedContiguousIterator> {
# TODO(dio): Consider to remove the following patch when we have IDN-free optional build for URL
# library from the upstream Chromium project. This is tracked in:
# https://github.com/envoyproxy/envoy/issues/14743.
diff --git a/url/BUILD b/url/BUILD
-index f2ec8da..4e2d55b 100644
+index f2ec8da..df69661 100644
--- a/url/BUILD
+++ b/url/BUILD
@@ -52,3 +52,27 @@ cc_library(
diff --git a/bazel/external/kafka_int32.patch b/bazel/external/kafka_int32.patch
deleted file mode 100644
index 8b88fe3358211..0000000000000
--- a/bazel/external/kafka_int32.patch
+++ /dev/null
@@ -1,27 +0,0 @@
---- DescribeGroupsResponse.json 2020-03-25 16:12:16.373302600 -0400
-+++ DescribeGroupsResponse.json 2020-03-25 16:11:16.184156200 -0400
-@@ -63,7 +63,7 @@
- { "name": "MemberAssignment", "type": "bytes", "versions": "0+",
- "about": "The current assignment provided by the group leader." }
- ]},
-- { "name": "AuthorizedOperations", "type": "int32", "versions": "3+", "default": "-2147483648",
-+ { "name": "AuthorizedOperations", "type": "int32", "versions": "3+", "default": "INT32_MIN",
- "about": "32-bit bitfield to represent authorized operations for this group." }
- ]}
- ]
-
---- MetadataResponse.json 2020-03-25 15:53:36.319161000 -0400
-+++ MetadataResponse.json 2020-03-25 15:54:11.510400000 -0400
-@@ -81,10 +81,10 @@
- { "name": "OfflineReplicas", "type": "[]int32", "versions": "5+", "ignorable": true,
- "about": "The set of offline replicas of this partition." }
- ]},
-- { "name": "TopicAuthorizedOperations", "type": "int32", "versions": "8+", "default": "-2147483648",
-+ { "name": "TopicAuthorizedOperations", "type": "int32", "versions": "8+", "default": "INT32_MIN",
- "about": "32-bit bitfield to represent authorized operations for this topic." }
- ]},
-- { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8+", "default": "-2147483648",
-+ { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8+", "default": "INT32_MIN",
- "about": "32-bit bitfield to represent authorized operations for this cluster." }
- ]
- }
diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD
index f8a1079ac93cd..8866c95b53eea 100644
--- a/bazel/external/quiche.BUILD
+++ b/bazel/external/quiche.BUILD
@@ -1266,6 +1266,7 @@ envoy_cc_library(
visibility = ["//visibility:public"],
deps = [
":quic_core_arena_scoped_ptr_lib",
+ ":quic_core_connection_context_lib",
":quic_core_time_lib",
],
)
@@ -2288,6 +2289,21 @@ envoy_cc_library(
deps = [":quic_core_types_lib"],
)
+envoy_cc_library(
+ name = "quic_core_http_capsule_lib",
+ srcs = ["quiche/quic/core/http/capsule.cc"],
+ hdrs = ["quiche/quic/core/http/capsule.h"],
+ copts = quiche_copts,
+ repository = "@envoy",
+ deps = [
+ ":quic_core_buffer_allocator_lib",
+ ":quic_core_data_lib",
+ ":quic_core_http_http_frames_lib",
+ ":quic_core_types_lib",
+ ":quic_platform_base",
+ ],
+)
+
envoy_cc_library(
name = "quic_core_http_client_lib",
srcs = [
@@ -2386,6 +2402,7 @@ envoy_cc_library(
repository = "@envoy",
tags = ["nofips"],
deps = [
+ ":quic_core_http_http_constants_lib",
":quic_core_types_lib",
":quic_platform_base",
":spdy_core_framer_lib",
@@ -2429,6 +2446,7 @@ envoy_cc_library(
"quiche/quic/core/http/quic_spdy_session.cc",
"quiche/quic/core/http/quic_spdy_stream.cc",
"quiche/quic/core/http/web_transport_http3.cc",
+ "quiche/quic/core/http/web_transport_stream_adapter.cc",
],
hdrs = [
"quiche/quic/core/http/quic_headers_stream.h",
@@ -2439,6 +2457,7 @@ envoy_cc_library(
"quiche/quic/core/http/quic_spdy_session.h",
"quiche/quic/core/http/quic_spdy_stream.h",
"quiche/quic/core/http/web_transport_http3.h",
+ "quiche/quic/core/http/web_transport_stream_adapter.h",
],
copts = quiche_copts,
repository = "@envoy",
@@ -2448,6 +2467,7 @@ envoy_cc_library(
":quic_core_connection_lib",
":quic_core_crypto_crypto_handshake_lib",
":quic_core_error_codes_lib",
+ ":quic_core_http_capsule_lib",
":quic_core_http_header_list_lib",
":quic_core_http_http_constants_lib",
":quic_core_http_http_decoder_lib",
@@ -2466,7 +2486,6 @@ envoy_cc_library(
":quic_core_utils_lib",
":quic_core_versions_lib",
":quic_core_web_transport_interface_lib",
- ":quic_core_web_transport_stream_adapter",
":quic_platform_base",
":quic_platform_mem_slice_storage",
":spdy_core_framer_lib",
@@ -3149,19 +3168,6 @@ envoy_cc_library(
],
)
-envoy_cc_library(
- name = "quic_core_web_transport_stream_adapter",
- srcs = ["quiche/quic/core/web_transport_stream_adapter.cc"],
- hdrs = ["quiche/quic/core/web_transport_stream_adapter.h"],
- copts = quiche_copts,
- repository = "@envoy",
- tags = ["nofips"],
- deps = [
- ":quic_core_session_lib",
- ":quic_core_web_transport_interface_lib",
- ],
-)
-
envoy_cc_library(
name = "quic_core_server_id_lib",
srcs = ["quiche/quic/core/quic_server_id.cc"],
diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD
index 2c9b481282cb0..e55355fe8f534 100644
--- a/bazel/foreign_cc/BUILD
+++ b/bazel/foreign_cc/BUILD
@@ -1,6 +1,6 @@
load("@rules_cc//cc:defs.bzl", "cc_library")
-load("//bazel:envoy_build_system.bzl", "envoy_cmake_external", "envoy_package")
-load("@rules_foreign_cc//tools/build_defs:configure.bzl", "configure_make")
+load("//bazel:envoy_build_system.bzl", "envoy_cmake", "envoy_package")
+load("@rules_foreign_cc//foreign_cc:configure.bzl", "configure_make")
licenses(["notice"]) # Apache 2
@@ -20,12 +20,14 @@ configure_make(
}),
lib_source = "@com_github_gperftools_gperftools//:all",
linkopts = ["-lpthread"],
- make_commands = ["make install-libLTLIBRARIES install-perftoolsincludeHEADERS"],
- static_libraries = select({
+ out_static_libs = select({
"//bazel:debug_tcmalloc": ["libtcmalloc_debug.a"],
"//conditions:default": ["libtcmalloc_and_profiler.a"],
}),
tags = ["skip_on_windows"],
+ targets = [
+ "install-libLTLIBRARIES install-perftoolsincludeHEADERS",
+ ],
)
# Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/227
@@ -44,14 +46,14 @@ configure_make(
configure_in_place = True,
configure_options = ["--disable-ssl --disable-gssapi --disable-lz4-ext --disable-zstd && cp Makefile.config src/.. && cp config.h src/.."],
lib_source = "@edenhill_librdkafka//:all",
- make_commands = [
- "make ARFLAGS='' libs install-subdirs",
- ],
- static_libraries = [
+ out_static_libs = [
"librdkafka.a",
"librdkafka++.a",
],
tags = ["skip_on_windows"],
+ targets = [
+ "ARFLAGS='' libs install-subdirs",
+ ],
alwayslink = True,
)
@@ -66,7 +68,7 @@ cc_library(
configure_make(
name = "luajit",
configure_command = "build.py",
- configure_env_vars = select({
+ env = select({
# This shouldn't be needed! See
# https://github.com/envoyproxy/envoy/issues/6084
# TODO(htuch): Remove when #6084 is fixed
@@ -76,18 +78,18 @@ configure_make(
"//conditions:default": {},
}),
lib_source = "@com_github_luajit_luajit//:all",
- make_commands = [],
out_include_dir = "include/luajit-2.1",
- static_libraries = select({
+ out_static_libs = select({
"//bazel:windows_x86_64": ["lua51.lib"],
"//conditions:default": ["libluajit-5.1.a"],
}),
+ targets = [],
)
configure_make(
name = "moonjit",
configure_command = "build.py",
- configure_env_vars = select({
+ env = select({
# This shouldn't be needed! See
# https://github.com/envoyproxy/envoy/issues/6084
# TODO(htuch): Remove when #6084 is fixed
@@ -96,13 +98,12 @@ configure_make(
"//conditions:default": {},
}),
lib_source = "@com_github_moonjit_moonjit//:all",
- make_commands = [],
out_include_dir = "include/moonjit-2.2",
- static_libraries = ["libluajit-5.1.a"],
+ out_static_libs = ["libluajit-5.1.a"],
tags = ["skip_on_windows"],
)
-envoy_cmake_external(
+envoy_cmake(
name = "libsxg",
cache_entries = {
"CMAKE_BUILD_TYPE": "Release",
@@ -112,15 +113,14 @@ envoy_cmake_external(
"SXG_WITH_CERT_CHAIN": "off",
"RUN_TEST": "off",
"CMAKE_INSTALL_LIBDIR": "lib",
- "CMAKE_TRY_COMPILE_TARGET_TYPE": "STATIC_LIBRARY",
},
lib_source = "@com_github_google_libsxg//:all",
- static_libraries = ["libsxg.a"],
+ out_static_libs = ["libsxg.a"],
tags = ["skip_on_windows"],
deps = ["@boringssl//:ssl"],
)
-envoy_cmake_external(
+envoy_cmake(
name = "ares",
cache_entries = {
"CARES_BUILD_TOOLS": "no",
@@ -135,17 +135,17 @@ envoy_cmake_external(
"//bazel:apple": ["-lresolv"],
"//conditions:default": [],
}),
- postfix_script = select({
- "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/ares_nameser.h $INSTALLDIR/include/ares_nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h",
- "//conditions:default": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h",
- }),
- static_libraries = select({
+ out_static_libs = select({
"//bazel:windows_x86_64": ["cares.lib"],
"//conditions:default": ["libcares.a"],
}),
+ postfix_script = select({
+ "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/ares_nameser.h $INSTALLDIR/include/ares_nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h",
+ "//conditions:default": "rm -f $INSTALLDIR/include/ares_dns.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h",
+ }),
)
-envoy_cmake_external(
+envoy_cmake(
name = "curl",
cache_entries = {
"BUILD_CURL_EXE": "off",
@@ -186,7 +186,7 @@ envoy_cmake_external(
defines = ["CURL_STATICLIB"],
generate_crosstool_file = True,
lib_source = "@com_github_curl//:all",
- static_libraries = select({
+ out_static_libs = select({
"//bazel:windows_x86_64": ["libcurl.lib"],
"//conditions:default": ["libcurl.a"],
}),
@@ -198,7 +198,7 @@ envoy_cmake_external(
],
)
-envoy_cmake_external(
+envoy_cmake(
name = "event",
cache_entries = {
"EVENT__DISABLE_OPENSSL": "on",
@@ -215,7 +215,7 @@ envoy_cmake_external(
"_GNU_SOURCE": "on",
},
lib_source = "@com_github_libevent_libevent//:all",
- static_libraries = select({
+ out_static_libs = select({
# macOS organization of libevent is different from Windows/Linux.
# Including libevent_core is a requirement on those platforms, but
# results in duplicate symbols when built on macOS.
@@ -236,7 +236,7 @@ envoy_cmake_external(
}),
)
-envoy_cmake_external(
+envoy_cmake(
name = "llvm",
cache_entries = {
# Disable both: BUILD and INCLUDE, since some of the INCLUDE
@@ -267,7 +267,7 @@ envoy_cmake_external(
# using -l:libstdc++.a.
"CMAKE_CXX_FLAGS": "-lstdc++",
},
- env_vars = {
+ env = {
# Workaround for the -DDEBUG flag added in fastbuild on macOS,
# which conflicts with DEBUG macro used in LLVM.
"CFLAGS": "-UDEBUG",
@@ -275,7 +275,7 @@ envoy_cmake_external(
"ASMFLAGS": "-UDEBUG",
},
lib_source = "@org_llvm_llvm//:all",
- static_libraries = select({
+ out_static_libs = select({
"//conditions:default": [
# Order from llvm-config --libnames asmparser core debuginfodwarf
# engine lto mcparser mirparser orcjit passes runtimedyld
@@ -336,7 +336,7 @@ envoy_cmake_external(
alwayslink = True,
)
-envoy_cmake_external(
+envoy_cmake(
name = "nghttp2",
cache_entries = {
"ENABLE_LIB_ONLY": "on",
@@ -349,13 +349,13 @@ envoy_cmake_external(
debug_cache_entries = {"ENABLE_DEBUG": "on"},
defines = ["NGHTTP2_STATICLIB"],
lib_source = "@com_github_nghttp2_nghttp2//:all",
- static_libraries = select({
+ out_static_libs = select({
"//bazel:windows_x86_64": ["nghttp2.lib"],
"//conditions:default": ["libnghttp2.a"],
}),
)
-envoy_cmake_external(
+envoy_cmake(
name = "wamr",
cache_entries = {
"LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm",
@@ -368,14 +368,13 @@ envoy_cmake_external(
"WAMR_BUILD_TAIL_CALL": "1",
},
lib_source = "@com_github_wamr//:all",
- static_libraries = ["libvmlib.a"],
+ out_static_libs = ["libvmlib.a"],
tags = ["skip_on_windows"],
deps = [":llvm"],
)
-envoy_cmake_external(
+envoy_cmake(
name = "wavm",
- binaries = ["wavm"],
cache_entries = {
"LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm",
"WAVM_ENABLE_STATIC_LINKING": "on",
@@ -385,7 +384,7 @@ envoy_cmake_external(
# using -l:libstdc++.a.
"CMAKE_CXX_FLAGS": "-lstdc++ -Wno-unused-command-line-argument",
},
- env_vars = {
+ env = {
# Workaround for the -DDEBUG flag added in fastbuild on macOS,
# which conflicts with DEBUG macro used in LLVM.
"CFLAGS": "-UDEBUG",
@@ -393,7 +392,8 @@ envoy_cmake_external(
"ASMFLAGS": "-UDEBUG",
},
lib_source = "@com_github_wavm_wavm//:all",
- static_libraries = select({
+ out_binaries = ["wavm"],
+ out_static_libs = select({
"//conditions:default": [
"libWAVM.a",
"libWAVMUnwind.a",
@@ -403,7 +403,7 @@ envoy_cmake_external(
deps = [":llvm"],
)
-envoy_cmake_external(
+envoy_cmake(
name = "zlib",
cache_entries = {
"CMAKE_CXX_COMPILER_FORCED": "on",
@@ -436,7 +436,7 @@ envoy_cmake_external(
"//bazel:zlib_ng": "@com_github_zlib_ng_zlib_ng//:all",
"//conditions:default": "@net_zlib//:all",
}),
- static_libraries = select({
+ out_static_libs = select({
"//bazel:windows_x86_64": ["zlib.lib"],
"//conditions:default": ["libz.a"],
}),
diff --git a/bazel/foreign_cc/zlib_ng.patch b/bazel/foreign_cc/zlib_ng.patch
index 77b04ef09496c..b4b73279da3f6 100644
--- a/bazel/foreign_cc/zlib_ng.patch
+++ b/bazel/foreign_cc/zlib_ng.patch
@@ -1,12 +1,13 @@
+
# Add support for compiling to WebAssembly using Emscripten.
# https://github.com/zlib-ng/zlib-ng/pull/794
diff --git a/cmake/detect-arch.c b/cmake/detect-arch.c
-index 5715535..2137691 100644
+
--- a/cmake/detect-arch.c
+++ b/cmake/detect-arch.c
-@@ -93,6 +93,10 @@
- #elif defined(__THW_RS6000)
- #error archfound rs6000
+@@ -101,6 +101,10 @@
+ #error archfound riscv32
+ #endif
+// Emscripten (WebAssembly)
+#elif defined(__EMSCRIPTEN__)
@@ -16,16 +17,16 @@ index 5715535..2137691 100644
#else
#error archfound unrecognized
diff --git a/cmake/detect-arch.cmake b/cmake/detect-arch.cmake
-index b80d666..c6cc214 100644
+
--- a/cmake/detect-arch.cmake
+++ b/cmake/detect-arch.cmake
-@@ -85,6 +85,9 @@ elseif("${ARCH}" MATCHES "parisc")
+@@ -85,6 +85,9 @@
elseif("${ARCH}" MATCHES "rs6000")
set(BASEARCH "rs6000")
set(BASEARCH_RS6000_FOUND TRUE)
+elseif("${ARCH}" MATCHES "wasm32")
+ set(BASEARCH "wasm32")
+ set(BASEARCH_WASM32_FOUND TRUE)
- else()
- set(BASEARCH "x86")
- set(BASEARCH_X86_FOUND TRUE)
+ elseif("${ARCH}" MATCHES "riscv(32|64)")
+ set(BASEARCH "riscv")
+ set(BASEARCH_RISCV_FOUND TRUE)
diff --git a/bazel/protobuf.patch b/bazel/protobuf.patch
index e786c7ebe1469..a6318ce8e49ff 100644
--- a/bazel/protobuf.patch
+++ b/bazel/protobuf.patch
@@ -4,15 +4,13 @@ new file mode 100644
index 0000000000..b66101a39a
--- /dev/null
+++ b/third_party/BUILD
-@@ -0,0 +1 @@
+@@ -0,0 +1,1 @@
+exports_files(["six.BUILD", "zlib.BUILD"])
-
-# patching for zlib binding
diff --git a/BUILD b/BUILD
-index efc3d8e7f..746ad4851 100644
+index 7de87f884..3f0fd5362 100644
--- a/BUILD
+++ b/BUILD
-@@ -24,7 +24,7 @@ config_setting(
+@@ -19,7 +19,7 @@ exports_files(["LICENSE"])
# ZLIB configuration
################################################################################
@@ -22,13 +20,13 @@ index efc3d8e7f..746ad4851 100644
################################################################################
# Protobuf Runtime Library
diff --git a/python/google/protobuf/__init__.py b/python/google/protobuf/__init__.py
-index 97ac28028..8b7585d9d 100644
+index cb4740412..91fe69ce5 100644
--- a/python/google/protobuf/__init__.py
+++ b/python/google/protobuf/__init__.py
@@ -31,3 +31,9 @@
# Copyright 2007 Google Inc. All Rights Reserved.
- __version__ = '3.16.0'
+ __version__ = '3.18.0'
+
+if __name__ != '__main__':
+ try:
diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl
index ae04a8918212c..ce8283d3777ee 100644
--- a/bazel/repositories.bzl
+++ b/bazel/repositories.bzl
@@ -52,6 +52,54 @@ _default_envoy_build_config = repository_rule(
},
)
+def _envoy_repo_impl(repository_ctx):
+ """This provides information about the Envoy repository
+
+ You can access the current version and path to the repository in .bzl/BUILD
+ files as follows:
+
+ ```starlark
+ load("@envoy_repo//:version.bzl", "VERSION")
+ ```
+
+ `VERSION` can be used to derive version-specific rules and can be passed
+ to the rules.
+
+ The `VERSION` and also the local `PATH` to the repo can be accessed in
+ python libraries/binaries. By adding `@envoy_repo` to `deps` they become
+ importable through the `envoy_repo` namespace.
+
+ As the `PATH` is local to the machine, it is generally only useful for
+ jobs that will run locally.
+
+ This can be useful for example, for tooling that needs to check the
+ repository, or to run bazel queries that cannot be run within the
+ constraints of a `genquery`.
+
+ """
+ repo_path = repository_ctx.path(repository_ctx.attr.envoy_root).dirname
+ version = repository_ctx.read(repo_path.get_child("VERSION")).strip()
+ repository_ctx.file("version.bzl", "VERSION = '%s'" % version)
+ repository_ctx.file("__init__.py", "PATH = '%s'\nVERSION = '%s'" % (repo_path, version))
+ repository_ctx.file("WORKSPACE", "")
+ repository_ctx.file("BUILD", """
+load("@rules_python//python:defs.bzl", "py_library")
+
+py_library(name = "envoy_repo", srcs = ["__init__.py"], visibility = ["//visibility:public"])
+
+""")
+
+_envoy_repo = repository_rule(
+ implementation = _envoy_repo_impl,
+ attrs = {
+ "envoy_root": attr.label(default = "@envoy//:BUILD"),
+ },
+)
+
+def envoy_repo():
+ if "envoy_repo" not in native.existing_rules().keys():
+ _envoy_repo(name = "envoy_repo")
+
# Python dependencies.
def _python_deps():
# TODO(htuch): convert these to pip3_import.
@@ -100,6 +148,9 @@ def _rust_deps():
external_http_archive("rules_rust")
def envoy_dependencies(skip_targets = []):
+ # Add a binding for repository variables.
+ envoy_repo()
+
# Setup Envoy developer tools.
envoy_dev_binding()
@@ -139,6 +190,7 @@ def envoy_dependencies(skip_targets = []):
_com_github_google_tcmalloc()
_com_github_gperftools_gperftools()
_com_github_grpc_grpc()
+ _com_github_intel_ipp_crypto_crypto_mb()
_com_github_jbeder_yaml_cpp()
_com_github_libevent_libevent()
_com_github_luajit_luajit()
@@ -175,7 +227,11 @@ def envoy_dependencies(skip_targets = []):
external_http_archive("com_github_google_flatbuffers")
external_http_archive("bazel_toolchains")
external_http_archive("bazel_compdb")
- external_http_archive("envoy_build_tools")
+ external_http_archive(
+ name = "envoy_build_tools",
+ patch_args = ["-p1"],
+ patches = ["@envoy//bazel/external:envoy_build_tools.patch"],
+ )
external_http_archive("rules_cc")
external_http_archive("rules_pkg")
@@ -325,6 +381,12 @@ def _com_github_google_libsxg():
actual = "@envoy//bazel/foreign_cc:libsxg",
)
+def _com_github_intel_ipp_crypto_crypto_mb():
+ external_http_archive(
+ name = "com_github_intel_ipp_crypto_crypto_mb",
+ build_file_content = BUILD_ALL_CONTENT,
+ )
+
def _com_github_jbeder_yaml_cpp():
external_http_archive(
name = "com_github_jbeder_yaml_cpp",
@@ -1014,7 +1076,6 @@ filegroup(
external_http_archive(
name = "kafka_source",
build_file_content = KAFKASOURCE_BUILD_CONTENT,
- patches = ["@envoy//bazel/external:kafka_int32.patch"],
)
# This archive provides Kafka C/CPP client used by mesh filter to communicate with upstream
diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl
index e6aa827110114..22849d7fd3c27 100644
--- a/bazel/repository_locations.bzl
+++ b/bazel/repository_locations.bzl
@@ -39,21 +39,21 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Apple Rules for Bazel",
project_desc = "Bazel rules for Apple platforms",
project_url = "https://github.com/bazelbuild/rules_apple",
- version = "0.31.2",
- sha256 = "c84962b64d9ae4472adfb01ec2cf1aa73cb2ee8308242add55fa7cc38602d882",
+ version = "0.31.3",
+ sha256 = "0052d452af7742c8f3a4e0929763388a66403de363775db7e90adecb2ba4944b",
urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"],
- release_date = "2021-05-07",
+ release_date = "2021-08-08",
use_category = ["build"],
),
rules_fuzzing = dict(
project_name = "Fuzzing Rules for Bazel",
project_desc = "Bazel rules for fuzz tests",
project_url = "https://github.com/bazelbuild/rules_fuzzing",
- version = "0.1.3",
- sha256 = "ce99c277c4e9e21f77222757936bf7ffb8823911497db84bdd57a796588fcf01",
+ version = "0.2.0",
+ sha256 = "9b688a77b930e1842312d37b00fbb796b96323a2eb8362b2cfb68e7d6e74f860",
strip_prefix = "rules_fuzzing-{version}",
urls = ["https://github.com/bazelbuild/rules_fuzzing/archive/v{version}.tar.gz"],
- release_date = "2021-04-01",
+ release_date = "2021-07-12",
use_category = ["test_only"],
implied_untracked_deps = [
# This is a repository rule generated to define an OSS-Fuzz fuzzing
@@ -65,11 +65,11 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "envoy-build-tools",
project_desc = "Common build tools shared by the Envoy/UDPA ecosystem",
project_url = "https://github.com/envoyproxy/envoy-build-tools",
- version = "a955a00bed5f35777a83899ee680f8530eee4718",
- sha256 = "b0830dc6fc1e3a095c5d817ca768c89c407bdd71894e1641daf500d28cb269da",
+ version = "55a7bbe700586729bd38231a9a6f3dcd1ff85e7d",
+ sha256 = "11893be9f0334a7e12ffc04b3b034dffe0bb5516d36654011532136c7929ae27",
strip_prefix = "envoy-build-tools-{version}",
urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"],
- release_date = "2021-05-25",
+ release_date = "2021-09-28",
use_category = ["build"],
),
boringssl = dict(
@@ -141,12 +141,12 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "xxHash",
project_desc = "Extremely fast hash algorithm",
project_url = "https://github.com/Cyan4973/xxHash",
- version = "0.7.3",
- sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7",
+ version = "0.8.0",
+ sha256 = "7054c3ebd169c97b64a92d7b994ab63c70dd53a06974f1f630ab782c28db0f4f",
strip_prefix = "xxHash-{version}",
urls = ["https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
- release_date = "2020-03-05",
+ release_date = "2020-07-27",
cpe = "N/A",
),
com_github_envoyproxy_sqlparser = dict(
@@ -175,6 +175,7 @@ REPOSITORY_LOCATIONS_SPEC = dict(
urls = ["https://github.com/mirror/tclap/archive/tclap-{version}-release-final.tar.gz"],
release_date = "2011-04-16",
use_category = ["other"],
+ cpe = "cpe:2.3:a:tclap_project:tclap:*",
),
com_github_fmtlib_fmt = dict(
project_name = "fmt",
@@ -240,11 +241,11 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "gperftools",
project_desc = "tcmalloc and profiling libraries",
project_url = "https://github.com/gperftools/gperftools",
- version = "2.8",
- sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e",
+ version = "2.9.1",
+ sha256 = "ea566e528605befb830671e359118c2da718f721c27225cbbc93858c7520fee3",
strip_prefix = "gperftools-{version}",
urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz"],
- release_date = "2020-07-06",
+ release_date = "2021-03-03",
use_category = ["dataplane_core", "controlplane"],
cpe = "cpe:2.3:a:gperftools_project:gperftools:*",
),
@@ -260,6 +261,19 @@ REPOSITORY_LOCATIONS_SPEC = dict(
release_date = "2021-06-07",
cpe = "cpe:2.3:a:grpc:grpc:*",
),
+ com_github_intel_ipp_crypto_crypto_mb = dict(
+ project_name = "libipp-crypto",
+ project_desc = "Intel® Integrated Performance Primitives Cryptography",
+ project_url = "https://github.com/intel/ipp-crypto",
+ version = "2021.4",
+ sha256 = "23e250dcf281aa00d186be8dc4e34fa8fc5c95a0895694cd00b33f18af5d60c7",
+ strip_prefix = "ipp-crypto-ippcp_{version}",
+ urls = ["https://github.com/intel/ipp-crypto/archive/ippcp_{version}.tar.gz"],
+ release_date = "2021-10-01",
+ use_category = ["dataplane_ext"],
+ extensions = ["envoy.tls.key_providers.cryptomb"],
+ cpe = "cpe:2.3:a:intel:cryptography_for_intel_integrated_performance_primitives:*",
+ ),
com_github_luajit_luajit = dict(
project_name = "LuaJIT",
project_desc = "Just-In-Time compiler for Lua",
@@ -292,12 +306,12 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Nghttp2",
project_desc = "Implementation of HTTP/2 and its header compression algorithm HPACK in Cimplementation of HTTP/2 and its header compression algorithm HPACK in C",
project_url = "https://nghttp2.org",
- version = "1.42.0",
- sha256 = "884d18a0158908125d58b1b61d475c0325e5a004e3d61a56b5fcc55d5f4b7af5",
+ version = "1.45.1",
+ sha256 = "2379ebeff7b02e14b9a414551d73540ddce5442bbecda2748417e8505916f3e7",
strip_prefix = "nghttp2-{version}",
urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
- release_date = "2020-11-23",
+ release_date = "2021-09-21",
cpe = "cpe:2.3:a:nghttp2:nghttp2:*",
),
io_opentracing_cpp = dict(
@@ -342,7 +356,7 @@ REPOSITORY_LOCATIONS_SPEC = dict(
use_category = ["observability_ext"],
extensions = ["envoy.tracers.skywalking"],
release_date = "2021-06-07",
- cpe = "N/A",
+ cpe = "cpe:2.3:a:apache:skywalking:*",
),
com_github_skyapm_cpp2sky = dict(
project_name = "cpp2sky",
@@ -441,12 +455,12 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "zlib-ng",
project_desc = "zlib fork (higher performance)",
project_url = "https://github.com/zlib-ng/zlib-ng",
- version = "b802a303ce8b6c86fbe3f93d59e0a82333768c0c",
- sha256 = "e051eade607ecbbfa2c7ed3087fe53e5d3a58325375e1e28209594138e4aa93d",
+ version = "2.0.5",
+ sha256 = "eca3fe72aea7036c31d00ca120493923c4d5b99fe02e6d3322f7c88dbdcd0085",
strip_prefix = "zlib-ng-{version}",
urls = ["https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
- release_date = "2020-10-18",
+ release_date = "2021-06-25",
cpe = "N/A",
),
com_github_jbeder_yaml_cpp = dict(
@@ -534,15 +548,15 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "nlohmann JSON",
project_desc = "Fast JSON parser/generator for C++",
project_url = "https://nlohmann.github.io/json",
- version = "3.9.1",
- sha256 = "4cf0df69731494668bdd6460ed8cb269b68de9c19ad8c27abc24cd72605b2d5b",
+ version = "3.10.2",
+ sha256 = "081ed0f9f89805c2d96335c3acfa993b39a0a5b4b4cef7edb68dd2210a13458c",
strip_prefix = "json-{version}",
urls = ["https://github.com/nlohmann/json/archive/v{version}.tar.gz"],
# This will be a replacement for rapidJSON used in extensions and may also be a fast
# replacement for protobuf JSON.
use_category = ["controlplane", "dataplane_core"],
- release_date = "2020-08-06",
- cpe = "cpe:2.3:a:json_project:json:*",
+ release_date = "2021-08-26",
+ cpe = "cpe:2.3:a:json-for-modern-cpp_project:json-for-modern-cpp:*",
),
# This is an external dependency needed while running the
# envoy docker image. A bazel target has been created since
@@ -606,30 +620,31 @@ REPOSITORY_LOCATIONS_SPEC = dict(
urls = ["https://github.com/google/googletest/archive/{version}.tar.gz"],
release_date = "2020-09-10",
use_category = ["test_only"],
+ cpe = "cpe:2.3:a:google:google_test:*",
),
com_google_protobuf = dict(
project_name = "Protocol Buffers",
project_desc = "Language-neutral, platform-neutral extensible mechanism for serializing structured data",
project_url = "https://developers.google.com/protocol-buffers",
- version = "3.16.0",
- sha256 = "d7371dc2d46fddac1af8cb27c0394554b068768fc79ecaf5be1a1863e8ff3392",
+ version = "3.18.0",
+ sha256 = "52b6160ae9266630adb5e96a9fc645215336371a740e87d411bfb63ea2f268a0",
strip_prefix = "protobuf-{version}",
urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
- release_date = "2021-05-07",
+ release_date = "2021-09-15",
cpe = "cpe:2.3:a:google:protobuf:*",
),
grpc_httpjson_transcoding = dict(
project_name = "grpc-httpjson-transcoding",
project_desc = "Library that supports transcoding so that HTTP/JSON can be converted to gRPC",
project_url = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding",
- version = "f1591a41318104b7e27a26be12f502b106a16256",
- sha256 = "440baf465096ce1a7152c6d1090a70e871e5ca93b23c6cf9f8cd79f028bf5bb8",
+ version = "3127eeaf889d48b5d2cd870fd910f1ae3e7abca4",
+ sha256 = "f98da3fe9b2539c9fc9b3884e01baa8d2e19ed016bc5f41bed2998781c96ac63",
strip_prefix = "grpc-httpjson-transcoding-{version}",
urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.http.grpc_json_transcoder"],
- release_date = "2021-05-08",
+ release_date = "2021-09-22",
cpe = "N/A",
),
io_bazel_rules_go = dict(
@@ -640,7 +655,7 @@ REPOSITORY_LOCATIONS_SPEC = dict(
sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b",
urls = ["https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz"],
use_category = ["build", "api"],
- release_date = "2021-03-17",
+ release_date = "2021-03-18",
implied_untracked_deps = [
"com_github_golang_protobuf",
"io_bazel_rules_nogo",
@@ -652,45 +667,42 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "C++ rules for Bazel",
project_desc = "Bazel rules for the C++ language",
project_url = "https://github.com/bazelbuild/rules_cc",
- # TODO(lizan): pin to a point releases when there's a released version.
- version = "b1c40e1de81913a3c40e5948f78719c28152486d",
- sha256 = "71d037168733f26d2a9648ad066ee8da4a34a13f51d24843a42efa6b65c2420f",
- strip_prefix = "rules_cc-{version}",
- urls = ["https://github.com/bazelbuild/rules_cc/archive/{version}.tar.gz"],
- release_date = "2020-11-11",
+ version = "0.0.1",
+ sha256 = "4dccbfd22c0def164c8f47458bd50e0c7148f3d92002cdb459c2a96a68498241",
+ urls = ["https://github.com/bazelbuild/rules_cc/releases/download/{version}/rules_cc-{version}.tar.gz"],
+ release_date = "2021-10-07",
use_category = ["build"],
),
rules_foreign_cc = dict(
project_name = "Rules for using foreign build systems in Bazel",
project_desc = "Rules for using foreign build systems in Bazel",
project_url = "https://github.com/bazelbuild/rules_foreign_cc",
- version = "d54c78ab86b40770ee19f0949db9d74a831ab9f0",
- sha256 = "e7446144277c9578141821fc91c55a61df7ae01bda890902f7286f5fd2f6ae46",
+ version = "6c0c2af3d599f4c23117a5e65e811ebab75bb151",
+ sha256 = "8a438371fa742bbbae8b6d995905280053098c5aac28cd434240cd75bc2415a5",
strip_prefix = "rules_foreign_cc-{version}",
urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz"],
- release_date = "2020-10-26",
- use_category = ["build"],
+ release_date = "2021-09-22",
+ use_category = ["build", "dataplane_core", "controlplane"],
),
rules_python = dict(
project_name = "Python rules for Bazel",
project_desc = "Bazel rules for the Python language",
project_url = "https://github.com/bazelbuild/rules_python",
- version = "9f597623ccfbe430b0d81c82498e33b80b7aec88",
- sha256 = "8d61fed6974f1e69e09243ca78c9ecf82f50fa3de64bb5df6b0b9061f9c9639b",
- release_date = "2021-09-07",
- strip_prefix = "rules_python-{version}",
- urls = ["https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz"],
+ version = "0.4.0",
+ sha256 = "954aa89b491be4a083304a2cb838019c8b8c3720a7abb9c4cb81ac7a24230cea",
+ release_date = "2021-09-12",
+ urls = ["https://github.com/bazelbuild/rules_python/releases/download/{version}/rules_python-{version}.tar.gz"],
use_category = ["build"],
),
rules_pkg = dict(
project_name = "Packaging rules for Bazel",
project_desc = "Bazel rules for the packaging distributions",
project_url = "https://github.com/bazelbuild/rules_pkg",
- version = "0.4.0",
- sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d",
+ version = "0.5.1",
+ sha256 = "a89e203d3cf264e564fcb96b6e06dd70bc0557356eb48400ce4b5d97c2c3720d",
urls = ["https://github.com/bazelbuild/rules_pkg/releases/download/{version}/rules_pkg-{version}.tar.gz"],
use_category = ["build"],
- release_date = "2021-03-03",
+ release_date = "2021-08-18",
),
six = dict(
project_name = "Six",
@@ -722,11 +734,11 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Webassembly Micro Runtime",
project_desc = "A standalone runtime with a small footprint for WebAssembly",
project_url = "https://github.com/bytecodealliance/wasm-micro-runtime",
- version = "b554a9d05d89bb4ef28068b4ae4d0ee6c99bc9db",
- sha256 = "de6b68118c5d4b0d37c9049fa08fae6a850304522ec307f087f0eca4ad8fff57",
+ version = "WAMR-08-10-2021",
+ sha256 = "4016f8330b2ed4fb5d9541ecd5bc4298f324097803a1f270fdbe691389cedfd9",
strip_prefix = "wasm-micro-runtime-{version}",
urls = ["https://github.com/bytecodealliance/wasm-micro-runtime/archive/{version}.tar.gz"],
- release_date = "2021-07-06",
+ release_date = "2021-08-10",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wamr"],
cpe = "N/A",
@@ -755,7 +767,7 @@ REPOSITORY_LOCATIONS_SPEC = dict(
release_date = "2021-04-05",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wasmtime"],
- cpe = "N/A",
+ cpe = "cpe:2.3:a:bytecodealliance:wasmtime:*",
),
com_github_wasm_c_api = dict(
project_name = "wasm-c-api",
@@ -790,8 +802,8 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "curl",
project_desc = "Library for transferring data with URLs",
project_url = "https://curl.haxx.se",
- version = "7.77.0",
- sha256 = "b0a3428acb60fa59044c4d0baae4e4fc09ae9af1d8a3aa84b2e3fbcd99841f77",
+ version = "7.79.1",
+ sha256 = "370b11201349816287fb0ccc995e420277fbfcaf76206e309b3f60f0eda090c2",
strip_prefix = "curl-{version}",
urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"],
use_category = ["dataplane_ext", "observability_ext"],
@@ -801,7 +813,7 @@ REPOSITORY_LOCATIONS_SPEC = dict(
"envoy.grpc_credentials.aws_iam",
"envoy.tracers.opencensus",
],
- release_date = "2021-05-26",
+ release_date = "2021-09-22",
cpe = "cpe:2.3:a:haxx:libcurl:*",
),
com_googlesource_chromium_v8 = dict(
@@ -822,36 +834,35 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "QUICHE",
project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols",
project_url = "https://github.com/google/quiche",
- version = "8d5eb27ee2e3f009f7180e8ace0ff97830d9c3e9",
- sha256 = "88cc71556b96bbec953a716a12c26f88b8af4d5e9a83cf3ec38aba4caed6bf52",
- # Static snapshot of https://quiche.googlesource.com/quiche/+archive/{version}.tar.gz
+ version = "72442c9337bac2fa6865e223e56fe9aac90d84a8",
+ sha256 = "ababed9c36cb16e43e7f1d508ae4a6ea89831752944fded3fb4fd2b3bead0bad",
urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"],
strip_prefix = "quiche-{version}",
use_category = ["dataplane_core"],
- release_date = "2021-08-31",
+ release_date = "2021-10-06",
cpe = "N/A",
),
com_googlesource_googleurl = dict(
project_name = "Chrome URL parsing library",
project_desc = "Chrome URL parsing library",
project_url = "https://quiche.googlesource.com/googleurl",
- # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz.
- version = "ef0d23689e240e6c8de4c3a5296b209128c87373",
- sha256 = "d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176",
+ # Static snapshot of https://quiche.googlesource.com/googleurl/+archive/561705e0066ff11e6cb97b8092f1547835beeb92.tar.gz.
+ version = "561705e0066ff11e6cb97b8092f1547835beeb92",
+ sha256 = "7ce00768fea1fa4c7bf658942f13e41c9ba30e9cff931a6cda2f9fd02289f673",
urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
extensions = [],
- release_date = "2020-07-30",
+ release_date = "2021-08-31",
cpe = "N/A",
),
com_google_cel_cpp = dict(
project_name = "Common Expression Language (CEL) C++ library",
project_desc = "Common Expression Language (CEL) C++ library",
project_url = "https://opensource.google/projects/cel",
- version = "0.6.1",
- sha256 = "d001494f1aa7d88172af944233fac3d7f83d9183d66590aa787aa2a35aab0440",
+ version = "89d81b2d2c24943b6e4fd5e8fc321099c2ab6d3f",
+ sha256 = "1408ef31e77ed847b420ff108da9652ad1702401008f2a75b671fba860a9707d",
strip_prefix = "cel-cpp-{version}",
- urls = ["https://github.com/google/cel-cpp/archive/v{version}.tar.gz"],
+ urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
@@ -862,18 +873,19 @@ REPOSITORY_LOCATIONS_SPEC = dict(
"envoy.filters.network.rbac",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
+ "envoy.rbac.matchers.upstream_ip_port",
],
- release_date = "2021-06-28",
+ release_date = "2021-10-07",
cpe = "N/A",
),
com_github_google_flatbuffers = dict(
project_name = "FlatBuffers",
project_desc = "Cross platform serialization library architected for maximum memory efficiency",
project_url = "https://github.com/google/flatbuffers",
- version = "a83caf5910644ba1c421c002ef68e42f21c15f9f",
- sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a",
+ version = "2.0.0",
+ sha256 = "9ddb9031798f4f8754d00fca2f1a68ecf9d0f83dfac7239af1311e4fd9a565c4",
strip_prefix = "flatbuffers-{version}",
- urls = ["https://github.com/google/flatbuffers/archive/{version}.tar.gz"],
+ urls = ["https://github.com/google/flatbuffers/archive/v{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
@@ -884,20 +896,21 @@ REPOSITORY_LOCATIONS_SPEC = dict(
"envoy.filters.network.rbac",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
+ "envoy.rbac.matchers.upstream_ip_port",
],
- release_date = "2020-04-02",
- cpe = "N/A",
+ release_date = "2021-05-10",
+ cpe = "cpe:2.3:a:google:flatbuffers:*",
),
com_googlesource_code_re2 = dict(
project_name = "RE2",
project_desc = "RE2, a regular expression library",
project_url = "https://github.com/google/re2",
- version = "2020-07-06",
- sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f",
+ version = "2021-09-01",
+ sha256 = "42a2e1d56b5de252f5d418dc1cc0848e9e52ca22b056453988b18c6195ec7f8d",
strip_prefix = "re2-{version}",
urls = ["https://github.com/google/re2/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
- release_date = "2020-07-06",
+ release_date = "2021-09-01",
cpe = "N/A",
),
# Included to access FuzzedDataProvider.h. This is compiler agnostic but
@@ -907,13 +920,14 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "compiler-rt",
project_desc = "LLVM compiler runtime library",
project_url = "https://compiler-rt.llvm.org",
- version = "11.0.1",
- sha256 = "087be3f1116e861cd969c9b0b0903c27028b52eaf45157276f50a9c2500687fc",
+ version = "12.0.1",
+ sha256 = "b4c8d5f2a802332987c1c0a95b5afb35b1a66a96fe44add4e4ed4792c4cba0a4",
# Only allow peeking at fuzzer related files for now.
strip_prefix = "compiler-rt-{version}.src",
urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz"],
- release_date = "2021-01-06",
+ release_date = "2021-07-09",
use_category = ["test_only"],
+ cpe = "cpe:2.3:a:llvm:compiler-rt:*",
),
upb = dict(
project_name = "upb",
@@ -931,13 +945,13 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Kafka (source)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
- version = "2.4.1",
- sha256 = "740236f44d66e33ea83382383b4fb7eabdab7093a644b525dd5ec90207f933bd",
+ version = "2.8.1",
+ sha256 = "c3fd89257e056e11b5e1b09d4bbd8332ce5abfdfa7c7a5bb6a5cfe9860fcc688",
strip_prefix = "kafka-{version}/clients/src/main/resources/common/message",
urls = ["https://github.com/apache/kafka/archive/{version}.zip"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.network.kafka_broker", "envoy.filters.network.kafka_mesh"],
- release_date = "2020-03-03",
+ release_date = "2021-09-14",
cpe = "cpe:2.3:a:apache:kafka:*",
),
edenhill_librdkafka = dict(
@@ -957,11 +971,11 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Kafka (server binary)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
- version = "2.4.1",
- sha256 = "2177cbd14118999e1d76fec628ca78ace7e6f841219dbc6035027c796bbe1a2a",
- strip_prefix = "kafka_2.12-{version}",
- urls = ["https://archive.apache.org/dist/kafka/{version}/kafka_2.12-{version}.tgz"],
- release_date = "2020-03-12",
+ version = "2.8.1",
+ sha256 = "4888b03e3b27dd94f2d830ce3bae9d7d98b0ccee3a5d30c919ccb60e0fa1f139",
+ strip_prefix = "kafka_2.13-{version}",
+ urls = ["https://archive.apache.org/dist/kafka/{version}/kafka_2.13-{version}.tgz"],
+ release_date = "2021-09-14",
use_category = ["test_only"],
),
kafka_python_client = dict(
diff --git a/bazel/utils.bzl b/bazel/utils.bzl
new file mode 100644
index 0000000000000..0961f00eb446a
--- /dev/null
+++ b/bazel/utils.bzl
@@ -0,0 +1,18 @@
+load("@bazel_skylib//rules:write_file.bzl", "write_file")
+
+def json_data(
+ name,
+ data,
+ visibility = ["//visibility:public"],
+ **kwargs):
+ """Write a bazel object to a file
+
+ The provided `data` object should be json serializable.
+ """
+ write_file(
+ name = name,
+ out = "%s.json" % name,
+ content = json.encode(data).split("\n"),
+ visibility = visibility,
+ **kwargs
+ )
diff --git a/ci/Dockerfile-envoy-windows b/ci/Dockerfile-envoy-windows
index 6f9514569c1c4..edeff92dd4ebd 100644
--- a/ci/Dockerfile-envoy-windows
+++ b/ci/Dockerfile-envoy-windows
@@ -4,7 +4,8 @@ ARG BUILD_TAG=ltsc2019
FROM $BUILD_OS:$BUILD_TAG
USER ContainerAdministrator
-RUN net user /add "EnvoyUser"
+RUN net accounts /MaxPWAge:unlimited
+RUN net user /add "EnvoyUser" /expires:never
RUN net localgroup "Network Configuration Operators" "EnvoyUser" /add
RUN mkdir "C:\\Program\ Files\\envoy"
diff --git a/ci/do_ci.sh b/ci/do_ci.sh
index d507d36993da0..4334c2304b34c 100755
--- a/ci/do_ci.sh
+++ b/ci/do_ci.sh
@@ -135,7 +135,7 @@ function bazel_binary_build() {
fi
# Build su-exec utility
- bazel build external:su-exec
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" external:su-exec
cp_binary_for_image_build "${BINARY_TYPE}" "${COMPILE_TYPE}" "${EXE_NAME}"
}
@@ -150,7 +150,7 @@ function bazel_contrib_binary_build() {
function run_process_test_result() {
if [[ -z "$CI_SKIP_PROCESS_TEST_RESULTS" ]] && [[ $(find "$TEST_TMPDIR" -name "*_attempt.xml" 2> /dev/null) ]]; then
echo "running flaky test reporting script"
- "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET"
+ bazel run "${BAZEL_BUILD_OPTIONS[@]}" //ci/flaky_test:process_xml "$CI_TARGET"
else
echo "no flaky test results found"
fi
@@ -368,24 +368,19 @@ elif [[ "$CI_TARGET" == "bazel.api" ]]; then
"${ENVOY_SRCDIR}"/tools/api/validate_structure.py
echo "Validate Golang protobuf generation..."
"${ENVOY_SRCDIR}"/tools/api/generate_go_protobuf.py
- echo "Testing API and API Boosting..."
- bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \
- @envoy_api_canonical//tools:tap2pcap_test @envoy_dev//clang_tools/api_booster/...
+ echo "Testing API..."
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api//test/... @envoy_api//tools/... \
+ @envoy_api//tools:tap2pcap_test
echo "Building API..."
- bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//envoy/...
- echo "Testing API boosting (golden C++ tests)..."
- # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet.
- BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" python3.8 "${ENVOY_SRCDIR}"/tools/api_boost/api_boost_test.py
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api//envoy/...
exit 0
elif [[ "$CI_TARGET" == "bazel.api_compat" ]]; then
- echo "Building buf..."
- bazel build @com_github_bufbuild_buf//:buf
- BUF_PATH=$(realpath "bazel-source/external/com_github_bufbuild_buf/bin/buf")
echo "Checking API for breaking changes to protobuf backwards compatibility..."
BASE_BRANCH_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh)
COMMIT_TITLE=$(git log -n 1 --pretty='format:%C(auto)%h (%s, %ad)' "${BASE_BRANCH_REF}")
echo -e "\tUsing base commit ${COMMIT_TITLE}"
- "${ENVOY_SRCDIR}"/tools/api_proto_breaking_change_detector/detector_ci.sh "${BUF_PATH}" "${BASE_BRANCH_REF}"
+ # BAZEL_BUILD_OPTIONS needed for setting the repository_cache param.
+ bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/api_proto_breaking_change_detector:detector_ci "${BASE_BRANCH_REF}"
exit 0
elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then
setup_clang_toolchain
@@ -462,7 +457,7 @@ elif [[ "$CI_TARGET" == "deps" ]]; then
"${ENVOY_SRCDIR}"/ci/check_repository_locations.sh
# Run pip requirements tests
- bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:pip_check "${ENVOY_SRCDIR}"
+ bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:pip_check
exit 0
elif [[ "$CI_TARGET" == "cve_scan" ]]; then
@@ -481,9 +476,6 @@ elif [[ "$CI_TARGET" == "tooling" ]]; then
echo "Run protoxform test"
BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" ./tools/protoxform/protoxform_test.sh
- echo "Run merge active shadow test"
- bazel test "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:merge_active_shadow_test
-
echo "check_format_test..."
"${ENVOY_SRCDIR}"/tools/code_format/check_format_test_helper.sh --log=WARN
diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh
index 9eb97f75afe62..77cef1e83ecf0 100755
--- a/ci/docker_ci.sh
+++ b/ci/docker_ci.sh
@@ -38,8 +38,13 @@ build_args() {
TYPE=$1
FILE_SUFFIX="${TYPE/-debug/}"
FILE_SUFFIX="${FILE_SUFFIX/-contrib/}"
+ FILE_SUFFIX="${FILE_SUFFIX/-ltsc2022/}"
printf ' -f ci/Dockerfile-envoy%s' "${FILE_SUFFIX}"
+ if [[ "${TYPE}" == *-windows* ]]; then
+ printf ' --build-arg BUILD_OS=%s --build-arg BUILD_TAG=%s' "${WINDOWS_IMAGE_BASE}" "${WINDOWS_IMAGE_TAG}"
+ fi
+
if [[ "${TYPE}" == *-contrib* ]]; then
printf ' --build-arg ENVOY_BINARY=envoy-contrib'
fi
@@ -103,7 +108,7 @@ push_images() {
PLATFORM="$(build_platforms "${TYPE}")"
# docker buildx doesn't do push with default builder
docker "${BUILD_COMMAND[@]}" --platform "${PLATFORM}" "${args[@]}" -t "${BUILD_TAG}" . --push || \
- docker push "${BUILD_TAG}"
+ docker push "${BUILD_TAG}"
}
MAIN_BRANCH="refs/heads/main"
@@ -125,7 +130,7 @@ DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}"
if is_windows; then
- BUILD_TYPES=("-windows")
+ BUILD_TYPES=("-${WINDOWS_BUILD_TYPE}")
# BuildKit is not available for Windows images, use standard build command
BUILD_COMMAND=("build")
else
diff --git a/ci/flaky_test/BUILD b/ci/flaky_test/BUILD
new file mode 100644
index 0000000000000..7cbc182ec9e0a
--- /dev/null
+++ b/ci/flaky_test/BUILD
@@ -0,0 +1,17 @@
+load("@rules_python//python:defs.bzl", "py_binary")
+load("//bazel:envoy_build_system.bzl", "envoy_package")
+load("@base_pip3//:requirements.bzl", "requirement")
+
+licenses(["notice"]) # Apache 2
+
+envoy_package()
+
+py_binary(
+ name = "process_xml",
+ srcs = ["process_xml.py"],
+ deps = [
+ "@envoy_repo",
+ requirement("pygithub"),
+ requirement("slackclient"),
+ ],
+)
diff --git a/ci/flaky_test/process_xml.py b/ci/flaky_test/process_xml.py
index 9eae5129275c8..943d710c4dbb3 100755
--- a/ci/flaky_test/process_xml.py
+++ b/ci/flaky_test/process_xml.py
@@ -1,17 +1,26 @@
#!/usr/bin/env python3
-import subprocess
import os
+import ssl
+import subprocess
+import sys
+from typing import Iterable
import xml.etree.ElementTree as ET
+
import slack
from slack.errors import SlackApiError
-import sys
-import ssl
+
+import envoy_repo
well_known_timeouts = [60, 300, 900, 3600]
section_delimiter = "---------------------------------------------------------------------------------------------------\n"
+def run_in_repo(command: Iterable) -> str:
+ """Run a command in the repo root"""
+ return subprocess.check_output(command, encoding="utf-8", cwd=envoy_repo.PATH)
+
+
# Returns a boolean indicating if a test passed.
def did_test_pass(file):
tree = ET.parse(file)
@@ -192,7 +201,7 @@ def get_git_info(CI_TARGET):
elif os.getenv('BUILD_REASON'):
ret += "Build reason: {}\n".format(os.environ['BUILD_REASON'])
- output = subprocess.check_output(['git', 'log', '--format=%H', '-n', '1'], encoding='utf-8')
+ output = run_in_repo(['git', 'log', '--format=%H', '-n', '1'])
ret += "Commmit: {}/commit/{}".format(os.environ['REPO_URI'], output)
build_id = os.environ['BUILD_URI'].split('/')[-1]
@@ -200,23 +209,23 @@ def get_git_info(CI_TARGET):
ret += "\n"
- remotes = subprocess.check_output(['git', 'remote'], encoding='utf-8').splitlines()
+ remotes = run_in_repo(['git', 'remote']).splitlines()
if ("origin" in remotes):
- output = subprocess.check_output(['git', 'remote', 'get-url', 'origin'], encoding='utf-8')
+ output = run_in_repo(['git', 'remote', 'get-url', 'origin'])
ret += "Origin: {}".format(output.replace('.git', ''))
if ("upstream" in remotes):
- output = subprocess.check_output(['git', 'remote', 'get-url', 'upstream'], encoding='utf-8')
+ output = run_in_repo(['git', 'remote', 'get-url', 'upstream'])
ret += "Upstream: {}".format(output.replace('.git', ''))
- output = subprocess.check_output(['git', 'describe', '--all', '--always'], encoding='utf-8')
+ output = run_in_repo(['git', 'describe', '--all', '--always'])
ret += "Latest ref: {}".format(output)
ret += "\n"
ret += "Last commit:\n"
- output = subprocess.check_output(['git', 'show', '-s'], encoding='utf-8')
+ output = run_in_repo(['git', 'show', '-s'])
for line in output.splitlines():
ret += "\t" + line + "\n"
@@ -225,7 +234,7 @@ def get_git_info(CI_TARGET):
return ret
-if __name__ == "__main__":
+def main():
CI_TARGET = ""
if len(sys.argv) == 2:
CI_TARGET = sys.argv[1]
@@ -286,3 +295,10 @@ def get_git_info(CI_TARGET):
print('No flaky tests found.\n')
os.remove(os.environ["TMP_OUTPUT_PROCESS_XML"])
+
+
+if __name__ == "__main__":
+ if os.getenv("ENVOY_BUILD_ARCH") == "aarch64":
+ os.environ["MULTIDICT_NO_EXTENSIONS"] = 1
+ os.environ["YARL_NO_EXTENSIONS"] = 1
+ main()
diff --git a/ci/flaky_test/requirements.txt b/ci/flaky_test/requirements.txt
deleted file mode 100644
index 1e9f11f4cfa12..0000000000000
--- a/ci/flaky_test/requirements.txt
+++ /dev/null
@@ -1,138 +0,0 @@
-aiohttp==3.7.4.post0 \
- --hash=sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5 \
- --hash=sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8 \
- --hash=sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95 \
- --hash=sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290 \
- --hash=sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f \
- --hash=sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809 \
- --hash=sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe \
- --hash=sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287 \
- --hash=sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc \
- --hash=sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87 \
- --hash=sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0 \
- --hash=sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970 \
- --hash=sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f \
- --hash=sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde \
- --hash=sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c \
- --hash=sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8 \
- --hash=sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f \
- --hash=sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5 \
- --hash=sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf \
- --hash=sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df \
- --hash=sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213 \
- --hash=sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4 \
- --hash=sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009 \
- --hash=sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5 \
- --hash=sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013 \
- --hash=sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16 \
- --hash=sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5 \
- --hash=sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b \
- --hash=sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd \
- --hash=sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439 \
- --hash=sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22 \
- --hash=sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a \
- --hash=sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb \
- --hash=sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb \
- --hash=sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9 \
- --hash=sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe \
- --hash=sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf
-async-timeout==3.0.1 \
- --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \
- --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3
-attrs==21.2.0 \
- --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \
- --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb
-chardet==4.0.0 \
- --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 \
- --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa
-idna==3.1 \
- --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \
- --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1
-idna_ssl==1.1.0 \
- --hash=sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c
-multidict==5.1.0 \
- --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \
- --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \
- --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \
- --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \
- --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \
- --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \
- --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \
- --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \
- --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \
- --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \
- --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \
- --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \
- --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \
- --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \
- --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \
- --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \
- --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \
- --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \
- --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \
- --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \
- --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \
- --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \
- --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \
- --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \
- --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \
- --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \
- --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \
- --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \
- --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \
- --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \
- --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \
- --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \
- --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \
- --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \
- --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 \
- --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \
- --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5
-slackclient==2.9.3 \
- --hash=sha256:2d68d668c02f4038299897e5c4723ab85dd40a3548354924b24f333a435856f8 \
- --hash=sha256:07ec8fa76f6aa64852210ae235ff9e637ba78124e06c0b07a7eeea4abb955965
-typing-extensions==3.10.0.2 \
- --hash=sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7 \
- --hash=sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34 \
- --hash=sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e
-wheel==0.37.0 \
- --hash=sha256:21014b2bd93c6d0034b6ba5d35e4eb284340e09d63c59aef6fc14b0f346146fd \
- --hash=sha256:e2ef7239991699e3355d54f8e968a21bb940a1dbf34a4d226741e64462516fad
-yarl==1.6.3 \
- --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \
- --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \
- --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \
- --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \
- --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \
- --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \
- --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \
- --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \
- --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \
- --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \
- --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \
- --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \
- --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \
- --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \
- --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \
- --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \
- --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \
- --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \
- --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \
- --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \
- --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 \
- --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \
- --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \
- --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \
- --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \
- --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \
- --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \
- --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \
- --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \
- --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \
- --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \
- --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \
- --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \
- --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \
- --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \
- --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \
- --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10
diff --git a/ci/flaky_test/run_process_xml.sh b/ci/flaky_test/run_process_xml.sh
deleted file mode 100755
index 38496128bb913..0000000000000
--- a/ci/flaky_test/run_process_xml.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-export ENVOY_SRCDIR=${ENVOY_SRCDIR:-.}
-
-# shellcheck source=tools/shell_utils.sh
-. "${ENVOY_SRCDIR}"/tools/shell_utils.sh
-
-if [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then
- export MULTIDICT_NO_EXTENSIONS=1
- export YARL_NO_EXTENSIONS=1
-fi
-
-python_venv process_xml "$1"
diff --git a/ci/format_pre.sh b/ci/format_pre.sh
index 831e57ca4a298..08808386b16b2 100755
--- a/ci/format_pre.sh
+++ b/ci/format_pre.sh
@@ -53,7 +53,7 @@ CURRENT=configs
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //configs:example_configs_validation
CURRENT=python
-bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:python_check -- --diff-file="$DIFF_OUTPUT" --fix "$(pwd)"
+bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:python_check -- --diff-file="$DIFF_OUTPUT" --fix
CURRENT=extensions
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/extensions:extensions_check
diff --git a/ci/osx-build-config/extensions_build_config.bzl b/ci/osx-build-config/extensions_build_config.bzl
index 40c8fee0685e8..379d6748e5a95 100644
--- a/ci/osx-build-config/extensions_build_config.bzl
+++ b/ci/osx-build-config/extensions_build_config.bzl
@@ -14,3 +14,4 @@ EXTENSIONS = {
WINDOWS_EXTENSIONS = {}
EXTENSION_CONFIG_VISIBILITY = ["//:extension_config"]
EXTENSION_PACKAGE_VISIBILITY = ["//:extension_library"]
+CONTRIB_EXTENSION_PACKAGE_VISIBILITY = ["//:contrib_library"]
diff --git a/ci/repokitteh/modules/newcontributor.star b/ci/repokitteh/modules/newcontributor.star
deleted file mode 100644
index 865e5e90c7624..0000000000000
--- a/ci/repokitteh/modules/newcontributor.star
+++ /dev/null
@@ -1,43 +0,0 @@
-
-NEW_CONTRIBUTOR_MESSAGE = """
-Hi @%s, welcome and thank you for your contribution.
-
-We will try to review your Pull Request as quickly as possible.
-
-In the meantime, please take a look at the [contribution guidelines](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md) if you have not done so already.
-
-"""
-
-DRAFT_MESSAGE = """
-As a reminder, PRs marked as draft will not be automatically assigned reviewers,
-or be handled by maintainer-oncall triage.
-
-Please mark your PR as ready when you want it to be reviewed!
-"""
-
-
-def get_pr_author_association(issue_number):
- return github.call(
- method="GET",
- path="repos/envoyproxy/envoy/pulls/%s" % issue_number)["json"]["author_association"]
-
-def is_newcontributor(issue_number):
- return (
- get_pr_author_association(issue_number)
- in ["NONE", "FIRST_TIME_CONTRIBUTOR", "FIRST_TIMER"])
-
-def should_message_newcontributor(action, issue_number):
- return (
- action == 'opened'
- and is_newcontributor(issue_number))
-
-def send_newcontributor_message(sender):
- github.issue_create_comment(NEW_CONTRIBUTOR_MESSAGE % sender)
-
-def _pr(action, issue_number, sender, config, draft):
- if should_message_newcontributor(action, issue_number):
- send_newcontributor_message(sender)
- if action == 'opened' and draft:
- github.issue_create_comment(DRAFT_MESSAGE)
-
-handlers.pull_request(func=_pr)
diff --git a/ci/repokitteh/modules/newpr.star b/ci/repokitteh/modules/newpr.star
index 865e5e90c7624..4c4797f442262 100644
--- a/ci/repokitteh/modules/newpr.star
+++ b/ci/repokitteh/modules/newpr.star
@@ -34,7 +34,14 @@ def should_message_newcontributor(action, issue_number):
def send_newcontributor_message(sender):
github.issue_create_comment(NEW_CONTRIBUTOR_MESSAGE % sender)
-def _pr(action, issue_number, sender, config, draft):
+def is_envoy_repo(repo_owner, repo_name):
+ return (
+ repo_owner == "envoyproxy"
+ and repo_name == "envoy")
+
+def _pr(action, issue_number, sender, config, draft, repo_owner, repo_name):
+ if not is_envoy_repo(repo_owner, repo_name):
+ return
if should_message_newcontributor(action, issue_number):
send_newcontributor_message(sender)
if action == 'opened' and draft:
diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh
index 68d8afc4d94c3..e72082a9cf0b5 100755
--- a/ci/upload_gcs_artifact.sh
+++ b/ci/upload_gcs_artifact.sh
@@ -18,9 +18,13 @@ if [ ! -d "${SOURCE_DIRECTORY}" ]; then
exit 1
fi
-if [[ "$BUILD_REASON" == "PullRequest" ]]; then
- # non-main upload to the last commit sha (first 7 chars) in the developers branch
- UPLOAD_PATH="$(git log --pretty=%P -n 1 | cut -d' ' -f2 | head -c7)"
+if [[ "$BUILD_REASON" == "PullRequest" ]] || [[ "$TARGET_SUFFIX" == "docs" ]]; then
+ # upload to the last commit sha (first 7 chars), either
+ # - docs build on main
+ # -> https://storage.googleapis.com/envoy-postsubmit/$UPLOAD_PATH/docs/envoy-docs-rst.tar.gz
+ # - PR build (commit sha from the developers branch)
+ # -> https://storage.googleapis.com/envoy-pr/$UPLOAD_PATH/$TARGET_SUFFIX
+ UPLOAD_PATH="$(git rev-parse HEAD | head -c7)"
else
UPLOAD_PATH="${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}}"
fi
diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh
index 67ff0eb409949..eed32c1218868 100755
--- a/ci/windows_ci_steps.sh
+++ b/ci/windows_ci_steps.sh
@@ -103,7 +103,7 @@ if [[ $TEST_TARGETS == "//test/..." ]]; then
bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" $TEST_TARGETS --test_tag_filters=-skip_on_windows,-fails_on_${FAIL_GROUP} --build_tests_only
echo "running flaky test reporting script"
- "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET"
+ bazel run "${BAZEL_BUILD_OPTIONS[@]}" //ci/flaky_test:process_xml "$CI_TARGET"
# Build tests that are known flaky or failing to ensure no compilation regressions
bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=fails_on_${FAIL_GROUP} --build_tests_only
diff --git a/configs/encapsulate_in_http1_connect.yaml b/configs/encapsulate_in_http1_connect.yaml
index a11a997880327..f8f9a6bc4a668 100644
--- a/configs/encapsulate_in_http1_connect.yaml
+++ b/configs/encapsulate_in_http1_connect.yaml
@@ -1,7 +1,7 @@
# This configuration takes incoming data on port 10000 and encapsulates it in a CONNECT
# request which is sent upstream port 10001.
# It can be used to test TCP tunneling as described in docs/root/intro/arch_overview/http/upgrades.rst
-# and running `curl --x 127.0.0.1:10000 https://www.google.com`
+# and running `curl -x 127.0.0.1:10000 https://www.google.com`
admin:
address:
diff --git a/configs/encapsulate_in_http2_connect.yaml b/configs/encapsulate_in_http2_connect.yaml
index abe84ecc86e29..1f985457ab2dd 100644
--- a/configs/encapsulate_in_http2_connect.yaml
+++ b/configs/encapsulate_in_http2_connect.yaml
@@ -1,7 +1,7 @@
# This configuration takes incoming data on port 10000 and encapsulates it in a CONNECT
# request which is sent upstream port 10001.
# It can be used to test TCP tunneling as described in docs/root/intro/arch_overview/http/upgrades.rst
-# and running `curl --x 127.0.0.1:10000 https://www.google.com`
+# and running `curl -x 127.0.0.1:10000 https://www.google.com`
admin:
address:
diff --git a/configs/encapsulate_in_http2_post.yaml b/configs/encapsulate_in_http2_post.yaml
index 61353a97a886e..d3979c393ad7f 100644
--- a/configs/encapsulate_in_http2_post.yaml
+++ b/configs/encapsulate_in_http2_post.yaml
@@ -1,7 +1,7 @@
# This configuration takes incoming data on port 10000 and encapsulates it in a POST
# request which is sent upstream port 10001.
# It can be used to test TCP tunneling as described in docs/root/intro/arch_overview/http/upgrades.rst
-# and running `curl --x 127.0.0.1:10000 https://www.google.com`
+# and running `curl -x 127.0.0.1:10000 https://www.google.com`
admin:
address:
diff --git a/configs/google_com_auto_http3_upstream_proxy.yaml b/configs/google_com_auto_http3_upstream_proxy.yaml
new file mode 100644
index 0000000000000..8767f87a59ef8
--- /dev/null
+++ b/configs/google_com_auto_http3_upstream_proxy.yaml
@@ -0,0 +1,72 @@
+# An example config which accepts HTTP/1 requests over TCP and forwards them to google using HTTP/3
+admin:
+ address:
+ socket_address:
+ protocol: TCP
+ address: 0.0.0.0
+ port_value: 9901
+static_resources:
+ listeners:
+ - name: listener_0
+ address:
+ socket_address:
+ protocol: TCP
+ address: 0.0.0.0
+ port_value: 10000
+ filter_chains:
+ - filters:
+ - name: envoy.filters.network.http_connection_manager
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
+ scheme_header_transformation:
+ scheme_to_overwrite: https
+ stat_prefix: ingress_http
+ route_config:
+ name: local_route
+ virtual_hosts:
+ - name: local_service
+ domains: ["*"]
+ routes:
+ - match:
+ prefix: "/"
+ route:
+ host_rewrite_literal: www.google.com
+ cluster: service_google
+ http_filters:
+ - name: alternate_protocols_cache
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.filters.http.alternate_protocols_cache.v3.FilterConfig
+ alternate_protocols_cache_options:
+ name: default_alternate_protocols_cache
+ - name: envoy.filters.http.router
+ clusters:
+ - name: service_google
+ connect_timeout: 30s
+ type: LOGICAL_DNS
+ # Comment out the following line to test on v6 networks
+ dns_lookup_family: V4_ONLY
+ lb_policy: ROUND_ROBIN
+ load_assignment:
+ cluster_name: service_google
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: www.google.com
+ port_value: 443
+ typed_extension_protocol_options:
+ envoy.extensions.upstreams.http.v3.HttpProtocolOptions:
+ "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions
+ auto_config:
+ http3_protocol_options: {}
+ alternate_protocols_cache_options:
+ name: default_alternate_protocols_cache
+ common_http_protocol_options:
+ idle_timeout: 1s
+ transport_socket:
+ name: envoy.transport_sockets.quic
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport
+ upstream_tls_context:
+ sni: www.google.com
diff --git a/configs/requirements.txt b/configs/requirements.txt
index 1cd69909b9962..7e65450464ab1 100644
--- a/configs/requirements.txt
+++ b/configs/requirements.txt
@@ -1,6 +1,6 @@
-Jinja2==3.0.1 \
- --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \
- --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4
+Jinja2==3.0.2 \
+ --hash=sha256:8569982d3f0889eed11dd620c706d39b60c36d6d25843961f33f77fb6bc6b20c \
+ --hash=sha256:827a0e32839ab1600d4eb1c4c33ec5a8edfbc5cb42dafa13b81f182f97784b45
MarkupSafe==2.0.1 \
--hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \
--hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \
diff --git a/contrib/BUILD b/contrib/BUILD
index aa0691c6142a8..ceedb6dfcaacb 100644
--- a/contrib/BUILD
+++ b/contrib/BUILD
@@ -1,6 +1,14 @@
+load("//bazel:utils.bzl", "json_data")
+load(":contrib_build_config.bzl", "CONTRIB_EXTENSIONS")
+
licenses(["notice"]) # Apache 2
exports_files([
"extensions_metadata.yaml",
"contrib_build_config.bzl",
])
+
+json_data(
+ name = "contrib_extensions_build_config",
+ data = CONTRIB_EXTENSIONS,
+)
diff --git a/contrib/all_contrib_extensions.bzl b/contrib/all_contrib_extensions.bzl
index 5a450825fd033..3862d7976a1f1 100644
--- a/contrib/all_contrib_extensions.bzl
+++ b/contrib/all_contrib_extensions.bzl
@@ -1,4 +1,14 @@
load(":contrib_build_config.bzl", "CONTRIB_EXTENSIONS")
-def envoy_all_contrib_extensions():
- return [v + "_envoy_extension" for v in CONTRIB_EXTENSIONS.values()]
+# linter requires indirection for @bazel_tools definitions
+def envoy_contrib_linux_x86_64_constraints():
+ return [
+ "@bazel_tools//platforms:linux",
+ "@bazel_tools//platforms:x86_64",
+ ]
+
+ARM64_SKIP_CONTRIB_TARGETS = ["envoy.tls.key_providers.cryptomb"]
+PPC_SKIP_CONTRIB_TARGETS = ["envoy.tls.key_providers.cryptomb"]
+
+def envoy_all_contrib_extensions(denylist = []):
+ return [v + "_envoy_extension" for k, v in CONTRIB_EXTENSIONS.items() if not k in denylist]
diff --git a/contrib/contrib_build_config.bzl b/contrib/contrib_build_config.bzl
index f27001d971be8..3a9987a910e49 100644
--- a/contrib/contrib_build_config.bzl
+++ b/contrib/contrib_build_config.bzl
@@ -16,4 +16,17 @@ CONTRIB_EXTENSIONS = {
"envoy.filters.network.mysql_proxy": "//contrib/mysql_proxy/filters/network/source:config",
"envoy.filters.network.postgres_proxy": "//contrib/postgres_proxy/filters/network/source:config",
"envoy.filters.network.rocketmq_proxy": "//contrib/rocketmq_proxy/filters/network/source:config",
+
+ #
+ # Sip proxy
+ #
+
+ "envoy.filters.network.sip_proxy": "//contrib/sip_proxy/filters/network/source:config",
+ "envoy.filters.sip.router": "//contrib/sip_proxy/filters/network/source/router:config",
+
+ #
+ # Private key providers
+ #
+
+ "envoy.tls.key_providers.cryptomb": "//contrib/cryptomb/private_key_providers/source:config",
}
diff --git a/contrib/cryptomb/private_key_providers/source/BUILD b/contrib/cryptomb/private_key_providers/source/BUILD
new file mode 100644
index 0000000000000..9e30bef90b5a0
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/source/BUILD
@@ -0,0 +1,113 @@
+load(
+ "//bazel:envoy_build_system.bzl",
+ "envoy_cc_contrib_extension",
+ "envoy_cc_library",
+ "envoy_cmake",
+ "envoy_contrib_package",
+)
+load(
+ "//contrib:all_contrib_extensions.bzl",
+ "envoy_contrib_linux_x86_64_constraints",
+)
+
+licenses(["notice"]) # Apache 2
+
+envoy_contrib_package()
+
+envoy_cmake(
+ name = "ipp-crypto",
+ cache_entries = {
+ "BORINGSSL": "on",
+ },
+ defines = [
+ "OPENSSL_USE_STATIC_LIBS=TRUE",
+ ],
+ lib_source = "@com_github_intel_ipp_crypto_crypto_mb//:all",
+ out_static_libs = ["libcrypto_mb.a"],
+ tags = ["skip_on_windows"],
+ target_compatible_with = envoy_contrib_linux_x86_64_constraints(),
+ visibility = ["//visibility:private"],
+ working_directory = "sources/ippcp/crypto_mb",
+ deps = ["@boringssl//:ssl"],
+)
+
+envoy_cc_library(
+ name = "ipp_crypto_wrapper_lib",
+ hdrs = ["ipp_crypto.h"] + select({
+ "//bazel:linux_x86_64": [
+ "ipp_crypto_impl.h",
+ ],
+ "//conditions:default": [
+ ],
+ }),
+ defines = select({
+ "//bazel:linux_x86_64": [],
+ "//conditions:default": [
+ "IPP_CRYPTO_DISABLED=1",
+ ],
+ }),
+ external_deps = ["ssl"],
+ repository = "@envoy",
+ deps = select({
+ "//bazel:linux_x86_64": [
+ ":ipp-crypto",
+ ],
+ "//conditions:default": [],
+ }),
+)
+
+envoy_cc_library(
+ name = "cryptomb_private_key_provider_lib",
+ srcs = [
+ "cryptomb_private_key_provider.cc",
+ ],
+ hdrs = [
+ "cryptomb_private_key_provider.h",
+ ],
+ external_deps = ["ssl"],
+ repository = "@envoy",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":ipp_crypto_wrapper_lib",
+ "//envoy/api:api_interface",
+ "//envoy/event:dispatcher_interface",
+ "//envoy/registry",
+ "//envoy/server:transport_socket_config_interface",
+ "//envoy/singleton:manager_interface",
+ "//envoy/ssl/private_key:private_key_config_interface",
+ "//envoy/ssl/private_key:private_key_interface",
+ "//source/common/common:logger_lib",
+ "//source/common/common:thread_lib",
+ "//source/common/config:datasource_lib",
+ "@envoy_api//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg_cc_proto",
+ ],
+)
+
+envoy_cc_contrib_extension(
+ name = "config",
+ srcs = ["config.cc"],
+ hdrs = ["config.h"],
+ defines = select({
+ "//bazel:linux_x86_64": [],
+ "//conditions:default": [
+ "IPP_CRYPTO_DISABLED=1",
+ ],
+ }),
+ deps = [
+ "//envoy/registry",
+ "//envoy/ssl/private_key:private_key_config_interface",
+ "//envoy/ssl/private_key:private_key_interface",
+ "//source/common/common:logger_lib",
+ "//source/common/config:utility_lib",
+ "//source/common/protobuf:utility_lib",
+ "@envoy_api//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg_cc_proto",
+ "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto",
+ ] + select({
+ "//bazel:linux_x86_64": [
+ ":cryptomb_private_key_provider_lib",
+ ":ipp_crypto_wrapper_lib",
+ ],
+ "//conditions:default": [
+ ],
+ }),
+)
diff --git a/contrib/cryptomb/private_key_providers/source/config.cc b/contrib/cryptomb/private_key_providers/source/config.cc
new file mode 100644
index 0000000000000..713dc733b6fb7
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/source/config.cc
@@ -0,0 +1,57 @@
+#include "contrib/cryptomb/private_key_providers/source/config.h"
+
+#include
+
+#include "envoy/registry/registry.h"
+#include "envoy/server/transport_socket_config.h"
+
+#include "source/common/common/logger.h"
+#include "source/common/config/utility.h"
+#include "source/common/protobuf/message_validator_impl.h"
+#include "source/common/protobuf/utility.h"
+
+#ifndef IPP_CRYPTO_DISABLED
+#include "contrib/cryptomb/private_key_providers/source/ipp_crypto_impl.h"
+#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h"
+#endif
+
+#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.h"
+#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.validate.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+Ssl::PrivateKeyMethodProviderSharedPtr
+CryptoMbPrivateKeyMethodFactory::createPrivateKeyMethodProviderInstance(
+ const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& proto_config,
+ Server::Configuration::TransportSocketFactoryContext& private_key_provider_context) {
+ ProtobufTypes::MessagePtr message =
+ std::make_unique();
+
+ Config::Utility::translateOpaqueConfig(proto_config.typed_config(),
+ ProtobufMessage::getNullValidationVisitor(), *message);
+ const envoy::extensions::private_key_providers::cryptomb::v3alpha::CryptoMbPrivateKeyMethodConfig
+ conf =
+ MessageUtil::downcastAndValidate(
+ *message, private_key_provider_context.messageValidationVisitor());
+ Ssl::PrivateKeyMethodProviderSharedPtr provider = nullptr;
+#ifdef IPP_CRYPTO_DISABLED
+ throw EnvoyException("X86_64 architecture is required for cryptomb provider.");
+#else
+ IppCryptoSharedPtr ipp = std::make_shared();
+ provider =
+ std::make_shared(conf, private_key_provider_context, ipp);
+#endif
+ return provider;
+}
+
+REGISTER_FACTORY(CryptoMbPrivateKeyMethodFactory, Ssl::PrivateKeyMethodProviderInstanceFactory);
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/source/config.h b/contrib/cryptomb/private_key_providers/source/config.h
new file mode 100644
index 0000000000000..d72a395da189d
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/source/config.h
@@ -0,0 +1,25 @@
+#pragma once
+
+#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h"
+#include "envoy/ssl/private_key/private_key.h"
+#include "envoy/ssl/private_key/private_key_config.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+class CryptoMbPrivateKeyMethodFactory : public Ssl::PrivateKeyMethodProviderInstanceFactory,
+ public Logger::Loggable {
+public:
+ // Ssl::PrivateKeyMethodProviderInstanceFactory
+ Ssl::PrivateKeyMethodProviderSharedPtr createPrivateKeyMethodProviderInstance(
+ const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& message,
+ Server::Configuration::TransportSocketFactoryContext& private_key_provider_context) override;
+ std::string name() const override { return "cryptomb"; };
+};
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.cc b/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.cc
new file mode 100644
index 0000000000000..78312b9636f9b
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.cc
@@ -0,0 +1,606 @@
+#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h"
+
+#include
+
+#include "envoy/registry/registry.h"
+#include "envoy/server/transport_socket_config.h"
+
+#include "source/common/config/datasource.h"
+
+#include "openssl/ec.h"
+#include "openssl/ssl.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+CryptoMbContext::CryptoMbContext(Event::Dispatcher& dispatcher,
+ Ssl::PrivateKeyConnectionCallbacks& cb)
+ : status_(RequestStatus::Retry), dispatcher_(dispatcher), cb_(cb) {}
+
+void CryptoMbContext::scheduleCallback(enum RequestStatus status) {
+ schedulable_ = dispatcher_.createSchedulableCallback([this, status]() -> void {
+ // The status can't be set beforehand, because the callback asserts
+ // if someone else races to call doHandshake() and the status goes to
+ // HandshakeComplete.
+ setStatus(status);
+ this->cb_.onPrivateKeyMethodComplete();
+ });
+ schedulable_->scheduleCallbackNextIteration();
+}
+
+bool CryptoMbRsaContext::rsaInit(const uint8_t* in, size_t in_len) {
+ if (rsa_ == nullptr) {
+ return false;
+ }
+
+ // Initialize the values with the RSA key.
+ size_t in_buf_size = in_len;
+ out_len_ = RSA_size(rsa_.get());
+
+ if (out_len_ > in_buf_size) {
+ in_buf_size = out_len_;
+ }
+
+ RSA_get0_key(rsa_.get(), &n_, &e_, &d_);
+ RSA_get0_factors(rsa_.get(), &p_, &q_);
+ RSA_get0_crt_params(rsa_.get(), &dmp1_, &dmq1_, &iqmp_);
+
+ if (p_ == nullptr || q_ == nullptr || dmp1_ == nullptr || dmq1_ == nullptr || iqmp_ == nullptr) {
+ return false;
+ }
+
+ in_buf_ = std::make_unique(in_buf_size);
+ memcpy(in_buf_.get(), in, in_len); // NOLINT(safe-memcpy)
+
+ return true;
+}
+
+namespace {
+
+int calculateDigest(const EVP_MD* md, const uint8_t* in, size_t in_len, unsigned char* hash,
+ unsigned int* hash_len) {
+ bssl::ScopedEVP_MD_CTX ctx;
+
+ // Calculate the message digest for signing.
+ if (!EVP_DigestInit_ex(ctx.get(), md, nullptr) || !EVP_DigestUpdate(ctx.get(), in, in_len) ||
+ !EVP_DigestFinal_ex(ctx.get(), hash, hash_len)) {
+ return 0;
+ }
+ return 1;
+}
+
+ssl_private_key_result_t ecdsaPrivateKeySignInternal(CryptoMbPrivateKeyConnection* ops,
+ uint8_t* out, size_t* out_len, size_t max_out,
+ uint16_t signature_algorithm,
+ const uint8_t* in, size_t in_len) {
+ unsigned char hash[EVP_MAX_MD_SIZE];
+ unsigned int hash_len;
+ unsigned int out_len_unsigned;
+
+ if (ops == nullptr) {
+ return ssl_private_key_failure;
+ }
+
+ const EVP_MD* md = SSL_get_signature_algorithm_digest(signature_algorithm);
+ if (md == nullptr) {
+ return ssl_private_key_failure;
+ }
+
+ if (!calculateDigest(md, in, in_len, hash, &hash_len)) {
+ return ssl_private_key_failure;
+ }
+
+ bssl::UniquePtr pkey = ops->getPrivateKey();
+ if (pkey == nullptr) {
+ return ssl_private_key_failure;
+ }
+
+ if (EVP_PKEY_id(pkey.get()) != SSL_get_signature_algorithm_key_type(signature_algorithm)) {
+ return ssl_private_key_failure;
+ }
+
+ bssl::UniquePtr ec_key(EVP_PKEY_get1_EC_KEY(pkey.get()));
+ if (ec_key == nullptr) {
+ return ssl_private_key_failure;
+ }
+
+ if (max_out < ECDSA_size(ec_key.get())) {
+ return ssl_private_key_failure;
+ }
+
+ // Borrow "out" because it has been already initialized to the max_out size.
+ if (!ECDSA_sign(0, hash, hash_len, out, &out_len_unsigned, ec_key.get())) {
+ return ssl_private_key_failure;
+ }
+
+ if (out_len_unsigned > max_out) {
+ return ssl_private_key_failure;
+ }
+ *out_len = out_len_unsigned;
+ return ssl_private_key_success;
+}
+
+ssl_private_key_result_t ecdsaPrivateKeySign(SSL* ssl, uint8_t* out, size_t* out_len,
+ size_t max_out, uint16_t signature_algorithm,
+ const uint8_t* in, size_t in_len) {
+ return ssl == nullptr ? ssl_private_key_failure
+ : ecdsaPrivateKeySignInternal(
+ static_cast(SSL_get_ex_data(
+ ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex())),
+ out, out_len, max_out, signature_algorithm, in, in_len);
+}
+
+ssl_private_key_result_t ecdsaPrivateKeyDecrypt(SSL*, uint8_t*, size_t*, size_t, const uint8_t*,
+ size_t) {
+ // Expecting to get only signing requests.
+ return ssl_private_key_failure;
+}
+
+ssl_private_key_result_t rsaPrivateKeySignInternal(CryptoMbPrivateKeyConnection* ops, uint8_t*,
+ size_t*, size_t, uint16_t signature_algorithm,
+ const uint8_t* in, size_t in_len) {
+
+ ssl_private_key_result_t status = ssl_private_key_failure;
+ if (ops == nullptr) {
+ return status;
+ }
+
+ bssl::UniquePtr pkey = ops->getPrivateKey();
+
+ // Check if the SSL instance has correct data attached to it.
+ if (EVP_PKEY_id(pkey.get()) != SSL_get_signature_algorithm_key_type(signature_algorithm)) {
+ return status;
+ }
+
+ bssl::UniquePtr rsa(EVP_PKEY_get1_RSA(pkey.get()));
+ if (rsa == nullptr) {
+ return status;
+ }
+
+ const EVP_MD* md = SSL_get_signature_algorithm_digest(signature_algorithm);
+ if (md == nullptr) {
+ return status;
+ }
+
+ unsigned char hash[EVP_MAX_MD_SIZE];
+ unsigned int hash_len;
+ if (!calculateDigest(md, in, in_len, hash, &hash_len)) {
+ return status;
+ }
+
+ uint8_t* msg;
+ size_t msg_len;
+ int prefix_allocated = 0;
+
+ // Add RSA padding to the the hash. Supported types are `PSS` and `PKCS1`.
+ if (SSL_is_signature_algorithm_rsa_pss(signature_algorithm)) {
+ msg_len = RSA_size(rsa.get());
+ // We have to do manual memory management here, because BoringSSL tells in `prefix_allocated`
+ // variable whether or not memory needs to be freed.
+ msg = static_cast(OPENSSL_malloc(msg_len));
+ if (msg == nullptr) {
+ return status;
+ }
+ prefix_allocated = 1;
+ if (!RSA_padding_add_PKCS1_PSS_mgf1(rsa.get(), msg, hash, md, nullptr, -1)) {
+ OPENSSL_free(msg);
+ return status;
+ }
+ } else {
+ if (!RSA_add_pkcs1_prefix(&msg, &msg_len, &prefix_allocated, EVP_MD_type(md), hash, hash_len)) {
+ if (prefix_allocated) {
+ OPENSSL_free(msg);
+ }
+ return status;
+ }
+ }
+
+ // Create MB context which will be used for this particular
+ // signing/decryption.
+ CryptoMbRsaContextSharedPtr mb_ctx =
+ std::make_shared(std::move(pkey), ops->dispatcher_, ops->cb_);
+
+ if (!mb_ctx->rsaInit(msg, msg_len)) {
+ if (prefix_allocated) {
+ OPENSSL_free(msg);
+ }
+ return status;
+ }
+
+ if (prefix_allocated) {
+ OPENSSL_free(msg);
+ }
+
+ ops->addToQueue(mb_ctx);
+ status = ssl_private_key_retry;
+ return status;
+}
+
+ssl_private_key_result_t rsaPrivateKeySign(SSL* ssl, uint8_t* out, size_t* out_len, size_t max_out,
+ uint16_t signature_algorithm, const uint8_t* in,
+ size_t in_len) {
+ return ssl == nullptr ? ssl_private_key_failure
+ : rsaPrivateKeySignInternal(
+ static_cast(SSL_get_ex_data(
+ ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex())),
+ out, out_len, max_out, signature_algorithm, in, in_len);
+}
+
+ssl_private_key_result_t rsaPrivateKeyDecryptInternal(CryptoMbPrivateKeyConnection* ops, uint8_t*,
+ size_t*, size_t, const uint8_t* in,
+ size_t in_len) {
+
+ if (ops == nullptr) {
+ return ssl_private_key_failure;
+ }
+
+ bssl::UniquePtr pkey = ops->getPrivateKey();
+
+ // Check if the SSL instance has correct data attached to it.
+ if (pkey == nullptr) {
+ return ssl_private_key_failure;
+ }
+
+ CryptoMbRsaContextSharedPtr mb_ctx =
+ std::make_shared(std::move(pkey), ops->dispatcher_, ops->cb_);
+
+ if (!mb_ctx->rsaInit(in, in_len)) {
+ return ssl_private_key_failure;
+ }
+
+ ops->addToQueue(mb_ctx);
+ return ssl_private_key_retry;
+}
+
+ssl_private_key_result_t rsaPrivateKeyDecrypt(SSL* ssl, uint8_t* out, size_t* out_len,
+ size_t max_out, const uint8_t* in, size_t in_len) {
+ return ssl == nullptr ? ssl_private_key_failure
+ : rsaPrivateKeyDecryptInternal(
+ static_cast(SSL_get_ex_data(
+ ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex())),
+ out, out_len, max_out, in, in_len);
+}
+
+ssl_private_key_result_t privateKeyCompleteInternal(CryptoMbPrivateKeyConnection* ops, uint8_t* out,
+ size_t* out_len, size_t max_out) {
+ if (ops == nullptr) {
+ return ssl_private_key_failure;
+ }
+
+ // Check if the MB operation is ready yet. This can happen if someone calls
+ // the top-level SSL function too early. The op status is only set from this
+ // thread.
+ if (ops->mb_ctx_->getStatus() == RequestStatus::Retry) {
+ return ssl_private_key_retry;
+ }
+
+ // If this point is reached, the MB processing must be complete.
+
+ // See if the operation failed.
+ if (ops->mb_ctx_->getStatus() != RequestStatus::Success) {
+ ops->logWarnMsg("private key operation failed.");
+ return ssl_private_key_failure;
+ }
+
+ *out_len = ops->mb_ctx_->out_len_;
+
+ if (*out_len > max_out) {
+ return ssl_private_key_failure;
+ }
+
+ memcpy(out, ops->mb_ctx_->out_buf_, *out_len); // NOLINT(safe-memcpy)
+
+ return ssl_private_key_success;
+}
+
+ssl_private_key_result_t privateKeyComplete(SSL* ssl, uint8_t* out, size_t* out_len,
+ size_t max_out) {
+ return ssl == nullptr ? ssl_private_key_failure
+ : privateKeyCompleteInternal(
+ static_cast(SSL_get_ex_data(
+ ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex())),
+ out, out_len, max_out);
+}
+
+} // namespace
+
+// External linking, meant for testing without SSL context.
+ssl_private_key_result_t privateKeyCompleteForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out,
+ size_t* out_len, size_t max_out) {
+ return privateKeyCompleteInternal(ops, out, out_len, max_out);
+}
+ssl_private_key_result_t ecdsaPrivateKeySignForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out,
+ size_t* out_len, size_t max_out,
+ uint16_t signature_algorithm, const uint8_t* in,
+ size_t in_len) {
+ return ecdsaPrivateKeySignInternal(ops, out, out_len, max_out, signature_algorithm, in, in_len);
+}
+ssl_private_key_result_t rsaPrivateKeySignForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out,
+ size_t* out_len, size_t max_out,
+ uint16_t signature_algorithm, const uint8_t* in,
+ size_t in_len) {
+ return rsaPrivateKeySignInternal(ops, out, out_len, max_out, signature_algorithm, in, in_len);
+}
+ssl_private_key_result_t rsaPrivateKeyDecryptForTest(CryptoMbPrivateKeyConnection* ops,
+ uint8_t* out, size_t* out_len, size_t max_out,
+ const uint8_t* in, size_t in_len) {
+ return rsaPrivateKeyDecryptInternal(ops, out, out_len, max_out, in, in_len);
+}
+
+CryptoMbQueue::CryptoMbQueue(std::chrono::milliseconds poll_delay, enum KeyType type, int keysize,
+ IppCryptoSharedPtr ipp, Event::Dispatcher& d)
+ : us_(std::chrono::duration_cast(poll_delay)), type_(type),
+ key_size_(keysize), ipp_(ipp),
+ timer_(d.createTimer([this]() -> void { processRequests(); })) {
+ request_queue_.reserve(MULTIBUFF_BATCH);
+}
+
+void CryptoMbQueue::startTimer() { timer_->enableHRTimer(us_); }
+
+void CryptoMbQueue::stopTimer() { timer_->disableTimer(); }
+
+void CryptoMbQueue::addAndProcessEightRequests(CryptoMbContextSharedPtr mb_ctx) {
+ // Add the request to the processing queue.
+ ASSERT(request_queue_.size() < MULTIBUFF_BATCH);
+ request_queue_.push_back(mb_ctx);
+
+ if (request_queue_.size() == MULTIBUFF_BATCH) {
+ // There are eight requests in the queue and we can process them.
+ stopTimer();
+ ENVOY_LOG(debug, "processing directly 8 requests");
+ processRequests();
+ } else if (request_queue_.size() == 1) {
+ // First request in the queue, start the queue timer.
+ startTimer();
+ }
+}
+
+void CryptoMbQueue::processRequests() {
+ if (type_ == KeyType::Rsa) {
+ processRsaRequests();
+ }
+ request_queue_.clear();
+}
+
+void CryptoMbQueue::processRsaRequests() {
+
+ const unsigned char* rsa_priv_from[MULTIBUFF_BATCH] = {nullptr};
+ unsigned char* rsa_priv_to[MULTIBUFF_BATCH] = {nullptr};
+ const BIGNUM* rsa_lenstra_e[MULTIBUFF_BATCH] = {nullptr};
+ const BIGNUM* rsa_lenstra_n[MULTIBUFF_BATCH] = {nullptr};
+ const BIGNUM* rsa_priv_p[MULTIBUFF_BATCH] = {nullptr};
+ const BIGNUM* rsa_priv_q[MULTIBUFF_BATCH] = {nullptr};
+ const BIGNUM* rsa_priv_dmp1[MULTIBUFF_BATCH] = {nullptr};
+ const BIGNUM* rsa_priv_dmq1[MULTIBUFF_BATCH] = {nullptr};
+ const BIGNUM* rsa_priv_iqmp[MULTIBUFF_BATCH] = {nullptr};
+
+ /* Build arrays of pointers for call */
+ for (unsigned req_num = 0; req_num < request_queue_.size(); req_num++) {
+ CryptoMbRsaContextSharedPtr mb_ctx =
+ std::static_pointer_cast(request_queue_[req_num]);
+ rsa_priv_from[req_num] = mb_ctx->in_buf_.get();
+ rsa_priv_to[req_num] = mb_ctx->out_buf_;
+ rsa_priv_p[req_num] = mb_ctx->p_;
+ rsa_priv_q[req_num] = mb_ctx->q_;
+ rsa_priv_dmp1[req_num] = mb_ctx->dmp1_;
+ rsa_priv_dmq1[req_num] = mb_ctx->dmq1_;
+ rsa_priv_iqmp[req_num] = mb_ctx->iqmp_;
+ }
+
+ ENVOY_LOG(debug, "Multibuffer RSA process {} requests", request_queue_.size());
+
+ uint32_t rsa_sts =
+ ipp_->mbxRsaPrivateCrtSslMb8(rsa_priv_from, rsa_priv_to, rsa_priv_p, rsa_priv_q,
+ rsa_priv_dmp1, rsa_priv_dmq1, rsa_priv_iqmp, key_size_);
+
+ enum RequestStatus status[MULTIBUFF_BATCH] = {RequestStatus::Retry};
+
+ for (unsigned req_num = 0; req_num < request_queue_.size(); req_num++) {
+ CryptoMbRsaContextSharedPtr mb_ctx =
+ std::static_pointer_cast(request_queue_[req_num]);
+ if (ipp_->mbxGetSts(rsa_sts, req_num)) {
+ ENVOY_LOG(debug, "Multibuffer RSA request {} success", req_num);
+ status[req_num] = RequestStatus::Success;
+ } else {
+ ENVOY_LOG(debug, "Multibuffer RSA request {} failure", req_num);
+ status[req_num] = RequestStatus::Error;
+ }
+
+ // `Lenstra` check (validate that we get the same result back).
+ rsa_priv_from[req_num] = rsa_priv_to[req_num];
+ rsa_priv_to[req_num] = mb_ctx->lenstra_to_;
+ rsa_lenstra_e[req_num] = mb_ctx->e_;
+ rsa_lenstra_n[req_num] = mb_ctx->n_;
+ }
+
+ rsa_sts =
+ ipp_->mbxRsaPublicSslMb8(rsa_priv_from, rsa_priv_to, rsa_lenstra_e, rsa_lenstra_n, key_size_);
+
+ for (unsigned req_num = 0; req_num < request_queue_.size(); req_num++) {
+ CryptoMbRsaContextSharedPtr mb_ctx =
+ std::static_pointer_cast(request_queue_[req_num]);
+ enum RequestStatus ctx_status;
+ if (ipp_->mbxGetSts(rsa_sts, req_num)) {
+ if (CRYPTO_memcmp(mb_ctx->in_buf_.get(), rsa_priv_to[req_num], mb_ctx->out_len_) != 0) {
+ status[req_num] = RequestStatus::Error;
+ }
+ // else keep the previous status from the private key operation
+ } else {
+ status[req_num] = RequestStatus::Error;
+ }
+
+ ctx_status = status[req_num];
+ mb_ctx->scheduleCallback(ctx_status);
+ }
+}
+
+CryptoMbPrivateKeyConnection::CryptoMbPrivateKeyConnection(Ssl::PrivateKeyConnectionCallbacks& cb,
+ Event::Dispatcher& dispatcher,
+ bssl::UniquePtr pkey,
+ CryptoMbQueue& queue)
+ : queue_(queue), dispatcher_(dispatcher), cb_(cb), pkey_(std::move(pkey)) {}
+
+void CryptoMbPrivateKeyMethodProvider::registerPrivateKeyMethod(
+ SSL* ssl, Ssl::PrivateKeyConnectionCallbacks& cb, Event::Dispatcher& dispatcher) {
+
+ if (SSL_get_ex_data(ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex()) != nullptr) {
+ throw EnvoyException("Not registering the CryptoMb provider twice for same context");
+ }
+
+ ASSERT(tls_->currentThreadRegistered(), "Current thread needs to be registered.");
+
+ CryptoMbQueue& queue = tls_->get()->queue_;
+
+ CryptoMbPrivateKeyConnection* ops =
+ new CryptoMbPrivateKeyConnection(cb, dispatcher, bssl::UpRef(pkey_), queue);
+ SSL_set_ex_data(ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex(), ops);
+}
+
+void CryptoMbPrivateKeyConnection::addToQueue(CryptoMbContextSharedPtr mb_ctx) {
+ mb_ctx_ = mb_ctx;
+ queue_.addAndProcessEightRequests(mb_ctx_);
+}
+
+bool CryptoMbPrivateKeyMethodProvider::checkFips() {
+ // `ipp-crypto` library is not fips-certified at the moment
+ // (https://github.com/intel/ipp-crypto#certification).
+ return false;
+}
+
+Ssl::BoringSslPrivateKeyMethodSharedPtr
+CryptoMbPrivateKeyMethodProvider::getBoringSslPrivateKeyMethod() {
+ return method_;
+}
+
+void CryptoMbPrivateKeyMethodProvider::unregisterPrivateKeyMethod(SSL* ssl) {
+ CryptoMbPrivateKeyConnection* ops = static_cast(
+ SSL_get_ex_data(ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex()));
+ SSL_set_ex_data(ssl, CryptoMbPrivateKeyMethodProvider::connectionIndex(), nullptr);
+ delete ops;
+}
+
+CryptoMbPrivateKeyMethodProvider::CryptoMbPrivateKeyMethodProvider(
+ const envoy::extensions::private_key_providers::cryptomb::v3alpha::
+ CryptoMbPrivateKeyMethodConfig& conf,
+ Server::Configuration::TransportSocketFactoryContext& factory_context, IppCryptoSharedPtr ipp)
+ : api_(factory_context.api()),
+ tls_(ThreadLocal::TypedSlot::makeUnique(factory_context.threadLocal())) {
+
+ if (!ipp->mbxIsCryptoMbApplicable(0)) {
+ throw EnvoyException("Multi-buffer CPU instructions not available.");
+ }
+
+ std::chrono::milliseconds poll_delay =
+ std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(conf, poll_delay, 200));
+
+ std::string private_key =
+ Config::DataSource::read(conf.private_key(), false, factory_context.api());
+
+ bssl::UniquePtr bio(
+ BIO_new_mem_buf(const_cast(private_key.data()), private_key.size()));
+
+ bssl::UniquePtr pkey(PEM_read_bio_PrivateKey(bio.get(), nullptr, nullptr, nullptr));
+ if (pkey == nullptr) {
+ throw EnvoyException("Failed to read private key.");
+ }
+
+ method_ = std::make_shared();
+
+ int key_size;
+
+ if (EVP_PKEY_id(pkey.get()) == EVP_PKEY_RSA) {
+ ENVOY_LOG(debug, "CryptoMb key type: RSA");
+ key_type_ = KeyType::Rsa;
+
+ method_->sign = rsaPrivateKeySign;
+ method_->decrypt = rsaPrivateKeyDecrypt;
+ method_->complete = privateKeyComplete;
+
+ RSA* rsa = EVP_PKEY_get0_RSA(pkey.get());
+
+ switch (RSA_bits(rsa)) {
+ case 1024:
+ key_size = 1024;
+ break;
+ case 2048:
+ key_size = 2048;
+ break;
+ case 3072:
+ key_size = 3072;
+ break;
+ case 4096:
+ key_size = 4096;
+ break;
+ default:
+ throw EnvoyException("Only RSA keys of 1024, 2048, 3072, and 4096 bits are supported.");
+ }
+
+ // If longer keys are ever supported, remember to change the signature buffer to be larger.
+ ASSERT(key_size / 8 <= CryptoMbContext::MAX_SIGNATURE_SIZE);
+
+ BIGNUM e_check;
+ // const BIGNUMs, memory managed by BoringSSL in RSA key structure.
+ const BIGNUM *e, *n, *d;
+ RSA_get0_key(rsa, &n, &e, &d);
+ BN_init(&e_check);
+ BN_add_word(&e_check, 65537);
+ if (e == nullptr || BN_ucmp(e, &e_check) != 0) {
+ BN_free(&e_check);
+ throw EnvoyException("Only RSA keys with \"e\" parameter value 65537 are allowed, because "
+ "we can validate the signatures using multi-buffer instructions.");
+ }
+ BN_free(&e_check);
+ } else if (EVP_PKEY_id(pkey.get()) == EVP_PKEY_EC) {
+ ENVOY_LOG(debug, "CryptoMb key type: ECDSA");
+ key_type_ = KeyType::Ec;
+
+ method_->sign = ecdsaPrivateKeySign;
+ method_->decrypt = ecdsaPrivateKeyDecrypt;
+ method_->complete = privateKeyComplete;
+
+ const EC_GROUP* ecdsa_group = EC_KEY_get0_group(EVP_PKEY_get0_EC_KEY(pkey.get()));
+ if (ecdsa_group == nullptr) {
+ throw EnvoyException("Invalid ECDSA key.");
+ }
+ BIGNUMConstPtr order(EC_GROUP_get0_order(ecdsa_group));
+ if (EC_GROUP_get_curve_name(ecdsa_group) != NID_X9_62_prime256v1) {
+ throw EnvoyException("Only P-256 ECDSA keys are supported.");
+ }
+ if (BN_num_bits(order.get()) < 160) {
+ throw EnvoyException("Too few significant bits.");
+ }
+ key_size = EC_GROUP_get_degree(ecdsa_group);
+ ASSERT(key_size == 256);
+ } else {
+ throw EnvoyException("Not supported key type, only EC and RSA are supported.");
+ }
+
+ pkey_ = std::move(pkey);
+
+ enum KeyType key_type = key_type_;
+
+ // Create a single queue for every worker thread to avoid locking.
+ tls_->set([poll_delay, key_type, key_size, ipp](Event::Dispatcher& d) {
+ ENVOY_LOG(debug, "Created CryptoMb Queue for thread {}", d.name());
+ return std::make_shared(poll_delay, key_type, key_size, ipp, d);
+ });
+}
+
+namespace {
+int createIndex() {
+ int index = SSL_get_ex_new_index(0, nullptr, nullptr, nullptr, nullptr);
+ RELEASE_ASSERT(index >= 0, "Failed to get SSL user data index.");
+ return index;
+}
+} // namespace
+
+int CryptoMbPrivateKeyMethodProvider::connectionIndex() {
+ CONSTRUCT_ON_FIRST_USE(int, createIndex());
+}
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h b/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h
new file mode 100644
index 0000000000000..b1a8a65bcc16a
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h
@@ -0,0 +1,189 @@
+#pragma once
+
+#include "envoy/api/api.h"
+#include "envoy/event/dispatcher.h"
+#include "envoy/ssl/private_key/private_key.h"
+#include "envoy/ssl/private_key/private_key_config.h"
+#include "envoy/thread_local/thread_local.h"
+
+#include "source/common/common/c_smart_ptr.h"
+#include "source/common/common/logger.h"
+
+#include "contrib/cryptomb/private_key_providers/source/ipp_crypto.h"
+#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+namespace {
+void dontFreeBN(const BIGNUM*) {}
+} // namespace
+using BIGNUMConstPtr = CSmartPtr;
+
+enum class RequestStatus { Retry, Success, Error };
+enum class KeyType { Rsa, Ec };
+
+// CryptoMbContext holds the actual data to be signed or encrypted. It also has a
+// reference to the worker thread dispatcher for communicating that it has
+// has ran the `AVX-512` code and the result is ready to be used.
+class CryptoMbContext {
+public:
+ static constexpr ssize_t MAX_SIGNATURE_SIZE = 512;
+
+ CryptoMbContext(Event::Dispatcher& dispatcher, Ssl::PrivateKeyConnectionCallbacks& cb);
+ virtual ~CryptoMbContext() = default;
+
+ void setStatus(RequestStatus status) { status_ = status; }
+ enum RequestStatus getStatus() { return status_; }
+ void scheduleCallback(enum RequestStatus status);
+
+ // Buffer length is the same as the max signature length (4096 bits = 512 bytes)
+ unsigned char out_buf_[MAX_SIGNATURE_SIZE];
+ // The real length of the signature.
+ size_t out_len_{};
+ // Incoming data buffer.
+ std::unique_ptr in_buf_;
+
+private:
+ // Whether the decryption / signing is ready.
+ enum RequestStatus status_ {};
+
+ Event::Dispatcher& dispatcher_;
+ Ssl::PrivateKeyConnectionCallbacks& cb_;
+ // For scheduling the callback to the next dispatcher cycle.
+ Event::SchedulableCallbackPtr schedulable_{};
+};
+
+// CryptoMbRsaContext is a CryptoMbContext which holds the extra RSA parameters and has
+// custom initialization function. It also has a separate buffer for RSA result
+// verification.
+class CryptoMbRsaContext : public CryptoMbContext {
+public:
+ CryptoMbRsaContext(bssl::UniquePtr pkey, Event::Dispatcher& dispatcher,
+ Ssl::PrivateKeyConnectionCallbacks& cb)
+ : CryptoMbContext(dispatcher, cb), rsa_(EVP_PKEY_get1_RSA(pkey.get())) {}
+ bool rsaInit(const uint8_t* in, size_t in_len);
+
+ // RSA key.
+ bssl::UniquePtr rsa_{};
+ // RSA parameters. Const pointers, which will contain values whose memory is
+ // managed within BoringSSL RSA key structure, so not wrapped in smart
+ // pointers.
+ const BIGNUM* d_{};
+ const BIGNUM* e_{};
+ const BIGNUM* n_{};
+ const BIGNUM* p_{};
+ const BIGNUM* q_{};
+ const BIGNUM* dmp1_{};
+ const BIGNUM* dmq1_{};
+ const BIGNUM* iqmp_{};
+
+ // Buffer for `Lenstra` check.
+ unsigned char lenstra_to_[MAX_SIGNATURE_SIZE];
+};
+
+using CryptoMbContextSharedPtr = std::shared_ptr;
+using CryptoMbRsaContextSharedPtr = std::shared_ptr;
+
+// CryptoMbQueue maintains the request queue and is able to process it.
+class CryptoMbQueue : public Logger::Loggable {
+public:
+ static constexpr uint32_t MULTIBUFF_BATCH = 8;
+
+ CryptoMbQueue(std::chrono::milliseconds poll_delay, enum KeyType type, int keysize,
+ IppCryptoSharedPtr ipp, Event::Dispatcher& d);
+ void addAndProcessEightRequests(CryptoMbContextSharedPtr mb_ctx);
+
+private:
+ void processRequests();
+ void processRsaRequests();
+ void startTimer();
+ void stopTimer();
+
+ // Polling delay.
+ std::chrono::microseconds us_{};
+
+ // Queue for the requests.
+ std::vector request_queue_;
+
+ // Key size and key type allowed for this particular queue.
+ const enum KeyType type_;
+ int key_size_{};
+
+ // Thread local data slot.
+ ThreadLocal::SlotPtr slot_{};
+
+ // Crypto operations library interface.
+ IppCryptoSharedPtr ipp_{};
+
+ // Timer to trigger queue processing if eight requests are not received in time.
+ Event::TimerPtr timer_{};
+};
+
+// CryptoMbPrivateKeyConnection maintains the data needed by a given SSL
+// connection.
+class CryptoMbPrivateKeyConnection : public Logger::Loggable {
+public:
+ CryptoMbPrivateKeyConnection(Ssl::PrivateKeyConnectionCallbacks& cb,
+ Event::Dispatcher& dispatcher, bssl::UniquePtr pkey,
+ CryptoMbQueue& queue);
+ virtual ~CryptoMbPrivateKeyConnection() = default;
+
+ bssl::UniquePtr getPrivateKey() { return bssl::UpRef(pkey_); };
+ void logDebugMsg(std::string msg) { ENVOY_LOG(debug, "CryptoMb: {}", msg); }
+ void logWarnMsg(std::string msg) { ENVOY_LOG(warn, "CryptoMb: {}", msg); }
+ void addToQueue(CryptoMbContextSharedPtr mb_ctx);
+
+ CryptoMbQueue& queue_;
+ Event::Dispatcher& dispatcher_;
+ Ssl::PrivateKeyConnectionCallbacks& cb_;
+ CryptoMbContextSharedPtr mb_ctx_{};
+
+private:
+ Event::FileEventPtr ssl_async_event_{};
+ bssl::UniquePtr pkey_;
+};
+
+// CryptoMbPrivateKeyMethodProvider handles the private key method operations for
+// an SSL socket.
+class CryptoMbPrivateKeyMethodProvider : public virtual Ssl::PrivateKeyMethodProvider,
+ public Logger::Loggable {
+public:
+ CryptoMbPrivateKeyMethodProvider(
+ const envoy::extensions::private_key_providers::cryptomb::v3alpha::
+ CryptoMbPrivateKeyMethodConfig& config,
+ Server::Configuration::TransportSocketFactoryContext& private_key_provider_context,
+ IppCryptoSharedPtr ipp);
+
+ // Ssl::PrivateKeyMethodProvider
+ void registerPrivateKeyMethod(SSL* ssl, Ssl::PrivateKeyConnectionCallbacks& cb,
+ Event::Dispatcher& dispatcher) override;
+ void unregisterPrivateKeyMethod(SSL* ssl) override;
+ bool checkFips() override;
+ Ssl::BoringSslPrivateKeyMethodSharedPtr getBoringSslPrivateKeyMethod() override;
+
+ static int connectionIndex();
+
+private:
+ // Thread local data containing a single queue per worker thread.
+ struct ThreadLocalData : public ThreadLocal::ThreadLocalObject {
+ ThreadLocalData(std::chrono::milliseconds poll_delay, enum KeyType type, int keysize,
+ IppCryptoSharedPtr ipp, Event::Dispatcher& d)
+ : queue_(poll_delay, type, keysize, ipp, d){};
+ CryptoMbQueue queue_;
+ };
+
+ Ssl::BoringSslPrivateKeyMethodSharedPtr method_{};
+ Api::Api& api_;
+ bssl::UniquePtr pkey_;
+ enum KeyType key_type_;
+
+ ThreadLocal::TypedSlotPtr tls_;
+};
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/source/ipp_crypto.h b/contrib/cryptomb/private_key_providers/source/ipp_crypto.h
new file mode 100644
index 0000000000000..d33d02270b4aa
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/source/ipp_crypto.h
@@ -0,0 +1,34 @@
+#pragma once
+
+#include "envoy/common/pure.h"
+
+#include "openssl/ssl.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+class IppCrypto {
+public:
+ virtual ~IppCrypto() = default;
+
+ virtual int mbxIsCryptoMbApplicable(uint64_t features) PURE;
+ virtual uint32_t mbxRsaPrivateCrtSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8],
+ const BIGNUM* const p_pa[8], const BIGNUM* const q_pa[8],
+ const BIGNUM* const dp_pa[8],
+ const BIGNUM* const dq_pa[8],
+ const BIGNUM* const iq_pa[8],
+ int expected_rsa_bitsize) PURE;
+ virtual uint32_t mbxRsaPublicSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8],
+ const BIGNUM* const e_pa[8], const BIGNUM* const n_pa[8],
+ int expected_rsa_bitsize) PURE;
+ virtual bool mbxGetSts(uint32_t status, unsigned req_num) PURE;
+};
+
+using IppCryptoSharedPtr = std::shared_ptr;
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/source/ipp_crypto_impl.h b/contrib/cryptomb/private_key_providers/source/ipp_crypto_impl.h
new file mode 100644
index 0000000000000..e27576ead61af
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/source/ipp_crypto_impl.h
@@ -0,0 +1,41 @@
+#pragma once
+
+#include "contrib/cryptomb/private_key_providers/source/ipp_crypto.h"
+#include "crypto_mb/cpu_features.h"
+#include "crypto_mb/ec_nistp256.h"
+#include "crypto_mb/rsa.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+class IppCryptoImpl : public virtual IppCrypto {
+public:
+ int mbxIsCryptoMbApplicable(uint64_t features) override {
+ return ::mbx_is_crypto_mb_applicable(features);
+ }
+ uint32_t mbxRsaPrivateCrtSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8],
+ const BIGNUM* const p_pa[8], const BIGNUM* const q_pa[8],
+ const BIGNUM* const dp_pa[8], const BIGNUM* const dq_pa[8],
+ const BIGNUM* const iq_pa[8], int expected_rsa_bitsize) override {
+ return ::mbx_rsa_private_crt_ssl_mb8(from_pa, to_pa, p_pa, q_pa, dp_pa, dq_pa, iq_pa,
+ expected_rsa_bitsize);
+ }
+ uint32_t mbxRsaPublicSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8],
+ const BIGNUM* const e_pa[8], const BIGNUM* const n_pa[8],
+ int expected_rsa_bitsize) override {
+ return ::mbx_rsa_public_ssl_mb8(from_pa, to_pa, e_pa, n_pa, expected_rsa_bitsize);
+ }
+ bool mbxGetSts(uint32_t status, unsigned req_num) override {
+ if (MBX_GET_STS(status, req_num) == MBX_STATUS_OK) {
+ return true;
+ }
+ return false;
+ };
+};
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/test/BUILD b/contrib/cryptomb/private_key_providers/test/BUILD
new file mode 100644
index 0000000000000..a48b639149f81
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/BUILD
@@ -0,0 +1,77 @@
+load(
+ "//bazel:envoy_build_system.bzl",
+ "envoy_cc_test",
+ "envoy_cc_test_library",
+ "envoy_contrib_package",
+)
+
+licenses(["notice"]) # Apache 2
+
+envoy_contrib_package()
+
+envoy_cc_test_library(
+ name = "test_fake_factory",
+ srcs = [
+ "fake_factory.cc",
+ ],
+ hdrs = [
+ "fake_factory.h",
+ ],
+ external_deps = ["ssl"],
+ deps = [
+ "//contrib/cryptomb/private_key_providers/source:cryptomb_private_key_provider_lib",
+ "//contrib/cryptomb/private_key_providers/source:ipp_crypto_wrapper_lib",
+ "//envoy/api:api_interface",
+ "//envoy/event:dispatcher_interface",
+ "//envoy/server:transport_socket_config_interface",
+ "//envoy/ssl/private_key:private_key_config_interface",
+ "//envoy/ssl/private_key:private_key_interface",
+ "//source/common/config:datasource_lib",
+ "//source/common/config:utility_lib",
+ "//source/common/protobuf:utility_lib",
+ "@envoy_api//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg_cc_proto",
+ "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto",
+ ],
+)
+
+envoy_cc_test(
+ name = "config_test",
+ srcs = [
+ "config_test.cc",
+ ],
+ data = [
+ "//contrib/cryptomb/private_key_providers/test/test_data:certs",
+ ],
+ deps = [
+ ":test_fake_factory",
+ "//source/common/common:random_generator_lib",
+ "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib",
+ "//test/mocks/runtime:runtime_mocks",
+ "//test/mocks/server:server_mocks",
+ "//test/mocks/ssl:ssl_mocks",
+ "//test/mocks/stats:stats_mocks",
+ "//test/mocks/thread_local:thread_local_mocks",
+ "//test/test_common:environment_lib",
+ "//test/test_common:registry_lib",
+ "//test/test_common:simulated_time_system_lib",
+ "//test/test_common:utility_lib",
+ ],
+)
+
+envoy_cc_test(
+ name = "ops_test",
+ srcs = [
+ "ops_test.cc",
+ ],
+ data = [
+ "//contrib/cryptomb/private_key_providers/test/test_data:certs",
+ ],
+ deps = [
+ ":test_fake_factory",
+ "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib",
+ "//test/mocks/stats:stats_mocks",
+ "//test/test_common:environment_lib",
+ "//test/test_common:simulated_time_system_lib",
+ "//test/test_common:utility_lib",
+ ],
+)
diff --git a/contrib/cryptomb/private_key_providers/test/config_test.cc b/contrib/cryptomb/private_key_providers/test/config_test.cc
new file mode 100644
index 0000000000000..365e1024ee4ad
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/config_test.cc
@@ -0,0 +1,274 @@
+#include
+
+#include "source/common/common/random_generator.h"
+#include "source/extensions/transport_sockets/tls/private_key/private_key_manager_impl.h"
+
+#include "test/common/stats/stat_test_utility.h"
+#include "test/mocks/common.h"
+#include "test/mocks/server/transport_socket_factory_context.h"
+#include "test/mocks/ssl/mocks.h"
+#include "test/mocks/thread_local/mocks.h"
+#include "test/test_common/environment.h"
+#include "test/test_common/registry.h"
+#include "test/test_common/simulated_time_system.h"
+#include "test/test_common/utility.h"
+
+#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h"
+#include "fake_factory.h"
+#include "gtest/gtest.h"
+
+using testing::NiceMock;
+using testing::ReturnRef;
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider
+parsePrivateKeyProviderFromV3Yaml(const std::string& yaml_string) {
+ envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider private_key_provider;
+ TestUtility::loadFromYaml(TestEnvironment::substitute(yaml_string), private_key_provider);
+ return private_key_provider;
+}
+
+class CryptoMbConfigTest : public Event::TestUsingSimulatedTime, public testing::Test {
+public:
+ CryptoMbConfigTest() : api_(Api::createApiForTest(store_, time_system_)) {
+ ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_));
+ ON_CALL(factory_context_, threadLocal()).WillByDefault(ReturnRef(tls_));
+ ON_CALL(factory_context_, sslContextManager()).WillByDefault(ReturnRef(context_manager_));
+ ON_CALL(context_manager_, privateKeyMethodManager())
+ .WillByDefault(ReturnRef(private_key_method_manager_));
+ }
+
+ Ssl::PrivateKeyMethodProviderSharedPtr createWithConfig(std::string yaml,
+ bool supported_instruction_set = true) {
+ FakeCryptoMbPrivateKeyMethodFactory cryptomb_factory(supported_instruction_set);
+ Registry::InjectFactory
+ cryptomb_private_key_method_factory(cryptomb_factory);
+
+ return factory_context_.sslContextManager()
+ .privateKeyMethodManager()
+ .createPrivateKeyMethodProvider(parsePrivateKeyProviderFromV3Yaml(yaml), factory_context_);
+ }
+
+ Event::SimulatedTimeSystem time_system_;
+ NiceMock factory_context_;
+ Stats::IsolatedStoreImpl store_;
+ Api::ApiPtr api_;
+ NiceMock tls_;
+ NiceMock context_manager_;
+ TransportSockets::Tls::PrivateKeyMethodManagerImpl private_key_method_manager_;
+};
+
+TEST_F(CryptoMbConfigTest, CreateRsa1024) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem" }
+)EOF";
+
+ Ssl::PrivateKeyMethodProviderSharedPtr provider = createWithConfig(yaml);
+ EXPECT_NE(nullptr, provider);
+ EXPECT_EQ(false, provider->checkFips());
+ Ssl::BoringSslPrivateKeyMethodSharedPtr method = provider->getBoringSslPrivateKeyMethod();
+ EXPECT_NE(nullptr, method);
+
+ ssl_private_key_result_t res;
+
+ res = method->sign(nullptr, nullptr, nullptr, 0, 0, nullptr, 0);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res = method->decrypt(nullptr, nullptr, nullptr, 0, nullptr, 0);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res = method->complete(nullptr, nullptr, nullptr, 0);
+ EXPECT_EQ(res, ssl_private_key_failure);
+}
+
+TEST_F(CryptoMbConfigTest, CreateRsa2048) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048.pem" }
+)EOF";
+
+ EXPECT_NE(nullptr, createWithConfig(yaml));
+}
+
+TEST_F(CryptoMbConfigTest, CreateRsa2048WithExponent3) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048-exponent-3.pem" }
+)EOF";
+
+ EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml), EnvoyException,
+ "Only RSA keys with \"e\" parameter value 65537 are allowed, because "
+ "we can validate the signatures using multi-buffer instructions.");
+}
+
+TEST_F(CryptoMbConfigTest, CreateRsa3072) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-3072.pem" }
+)EOF";
+
+ EXPECT_NE(nullptr, createWithConfig(yaml));
+}
+
+TEST_F(CryptoMbConfigTest, CreateRsa4096) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem" }
+)EOF";
+
+ EXPECT_NE(nullptr, createWithConfig(yaml));
+}
+
+TEST_F(CryptoMbConfigTest, CreateRsa512) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-512.pem" }
+)EOF";
+
+ EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml), EnvoyException,
+ "Only RSA keys of 1024, 2048, 3072, and 4096 bits are supported.");
+}
+
+TEST_F(CryptoMbConfigTest, CreateEcdsaP256) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem" }
+)EOF";
+
+ Ssl::PrivateKeyMethodProviderSharedPtr provider = createWithConfig(yaml);
+ EXPECT_NE(nullptr, provider);
+ EXPECT_EQ(false, provider->checkFips());
+ Ssl::BoringSslPrivateKeyMethodSharedPtr method = provider->getBoringSslPrivateKeyMethod();
+ EXPECT_NE(nullptr, method);
+
+ ssl_private_key_result_t res;
+
+ res = method->sign(nullptr, nullptr, nullptr, 0, 0, nullptr, 0);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res = method->decrypt(nullptr, nullptr, nullptr, 0, nullptr, 0);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res = method->complete(nullptr, nullptr, nullptr, 0);
+ EXPECT_EQ(res, ssl_private_key_failure);
+}
+
+TEST_F(CryptoMbConfigTest, CreateEcdsaP256Inline) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key:
+ inline_string: |
+ -----BEGIN PRIVATE KEY-----
+ MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgIxp5QZ3YFaT8s+CR
+ rqUqeYSe5D9APgBZbyCvAkO2/JChRANCAARM53DFLHORcSyBpu5zpaG7/HfLXT8H
+ r1RaoGEiH9pi3MIKg1H+b8EaM1M4wURT2yXMjuvogQ6ixs0B1mvRkZnL
+ -----END PRIVATE KEY-----
+)EOF";
+
+ EXPECT_NE(nullptr, createWithConfig(yaml));
+}
+
+TEST_F(CryptoMbConfigTest, CreateEcdsaP384) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p384.pem" }
+)EOF";
+
+ EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml), EnvoyException,
+ "Only P-256 ECDSA keys are supported.");
+}
+
+TEST_F(CryptoMbConfigTest, CreateMissingPrivateKey) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/missing.pem" }
+)EOF";
+
+ EXPECT_THROW(createWithConfig(yaml), EnvoyException);
+}
+
+TEST_F(CryptoMbConfigTest, CreateMissingKey) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0.02s
+ )EOF";
+
+ EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml), EnvoyException,
+ "Unexpected DataSource::specifier_case(): 0");
+}
+
+TEST_F(CryptoMbConfigTest, CreateMissingPollDelay) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem" }
+ )EOF";
+
+ EXPECT_THROW_WITH_REGEX(createWithConfig(yaml), EnvoyException,
+ "Proto constraint validation failed");
+}
+
+TEST_F(CryptoMbConfigTest, CreateZeroPollDelay) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ poll_delay: 0s
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem" }
+ )EOF";
+
+ EXPECT_THROW_WITH_REGEX(createWithConfig(yaml), EnvoyException,
+ "Proto constraint validation failed");
+}
+
+TEST_F(CryptoMbConfigTest, CreateNotSupportedInstructionSet) {
+ const std::string yaml = R"EOF(
+ provider_name: cryptomb
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.private_key_providers.cryptomb.v3alpha.CryptoMbPrivateKeyMethodConfig
+ private_key: { "filename": "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem" }
+ poll_delay: 0.02s
+ )EOF";
+
+ EXPECT_THROW_WITH_MESSAGE(createWithConfig(yaml, false), EnvoyException,
+ "Multi-buffer CPU instructions not available.");
+}
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/test/fake_factory.cc b/contrib/cryptomb/private_key_providers/test/fake_factory.cc
new file mode 100644
index 0000000000000..1fddc745f732f
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/fake_factory.cc
@@ -0,0 +1,171 @@
+#include "fake_factory.h"
+
+#include
+
+#include "envoy/registry/registry.h"
+#include "envoy/server/transport_socket_config.h"
+
+#include "source/common/config/datasource.h"
+#include "source/common/config/utility.h"
+#include "source/common/protobuf/message_validator_impl.h"
+#include "source/common/protobuf/utility.h"
+
+#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h"
+#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.h"
+#include "contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.pb.validate.h"
+#include "openssl/rsa.h"
+#include "openssl/ssl.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+FakeIppCryptoImpl::FakeIppCryptoImpl(bool supported_instruction_set)
+ : supported_instruction_set_(supported_instruction_set) {}
+
+FakeIppCryptoImpl::~FakeIppCryptoImpl() {
+ BN_free(n_);
+ BN_free(e_);
+ BN_free(d_);
+}
+
+int FakeIppCryptoImpl::mbxIsCryptoMbApplicable(uint64_t) {
+ return supported_instruction_set_ ? 1 : 0;
+}
+
+uint32_t FakeIppCryptoImpl::mbxSetSts(uint32_t status, unsigned req_num, bool success) {
+ if (success) {
+ // clear bit req_num
+ return status & ~(1UL << req_num);
+ }
+ // set bit req_num
+ return status | (1UL << req_num);
+}
+
+bool FakeIppCryptoImpl::mbxGetSts(uint32_t status, unsigned req_num) {
+ // return true if bit req_num if not set
+ return !((status >> req_num) & 1UL);
+}
+
+uint32_t FakeIppCryptoImpl::mbxRsaPrivateCrtSslMb8(
+ const uint8_t* const from_pa[8], uint8_t* const to_pa[8], const BIGNUM* const p_pa[8],
+ const BIGNUM* const q_pa[8], const BIGNUM* const dp_pa[8], const BIGNUM* const dq_pa[8],
+ const BIGNUM* const iq_pa[8], int expected_rsa_bitsize) {
+
+ uint32_t status = 0xff;
+
+ for (int i = 0; i < 8; i++) {
+ RSA* rsa;
+ size_t out_len = 0;
+ int ret;
+
+ if (from_pa[i] == nullptr) {
+ break;
+ }
+
+ rsa = RSA_new();
+
+ RSA_set0_factors(rsa, BN_dup(p_pa[i]), BN_dup(q_pa[i]));
+ RSA_set0_crt_params(rsa, BN_dup(dp_pa[i]), BN_dup(dq_pa[i]), BN_dup(iq_pa[i]));
+
+ // The real `mbx_rsa_private_crt_ssl_mb8` doesn't require these parameters to
+ // be set, but BoringSSL does. That's why they are provided out-of-band in
+ // the factory initialization.
+ RSA_set0_key(rsa, BN_dup(n_), BN_dup(e_), BN_dup(d_));
+
+ // From the docs: "Memory buffers of the plain- and `ciphertext` must be `ceil(rsaBitlen/8)`
+ // bytes length."
+ ret = RSA_sign_raw(rsa, &out_len, to_pa[i], expected_rsa_bitsize / 8, from_pa[i],
+ expected_rsa_bitsize / 8, RSA_NO_PADDING);
+
+ RSA_free(rsa);
+
+ status = mbxSetSts(status, i, inject_errors_ ? !ret : ret);
+ }
+
+ UNREFERENCED_PARAMETER(expected_rsa_bitsize);
+
+ return status;
+}
+
+uint32_t FakeIppCryptoImpl::mbxRsaPublicSslMb8(const uint8_t* const from_pa[8],
+ uint8_t* const to_pa[8], const BIGNUM* const e_pa[8],
+ const BIGNUM* const n_pa[8],
+ int expected_rsa_bitsize) {
+ uint32_t status = 0xff;
+
+ for (int i = 0; i < 8; i++) {
+ RSA* rsa;
+ size_t out_len = 0;
+ int ret;
+
+ if (e_pa[i] == nullptr) {
+ break;
+ }
+
+ rsa = RSA_new();
+
+ RSA_set0_key(rsa, BN_dup(n_pa[i]), BN_dup(e_pa[i]), BN_dup(d_));
+
+ ret = RSA_verify_raw(rsa, &out_len, to_pa[i], expected_rsa_bitsize / 8, from_pa[i],
+ expected_rsa_bitsize / 8, RSA_NO_PADDING);
+
+ RSA_free(rsa);
+
+ status = mbxSetSts(status, i, inject_errors_ ? !ret : ret);
+ }
+
+ UNREFERENCED_PARAMETER(expected_rsa_bitsize);
+
+ return status;
+}
+
+FakeCryptoMbPrivateKeyMethodFactory::FakeCryptoMbPrivateKeyMethodFactory(
+ bool supported_instruction_set)
+ : supported_instruction_set_(supported_instruction_set) {}
+
+Ssl::PrivateKeyMethodProviderSharedPtr
+FakeCryptoMbPrivateKeyMethodFactory::createPrivateKeyMethodProviderInstance(
+ const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& proto_config,
+ Server::Configuration::TransportSocketFactoryContext& private_key_provider_context) {
+ ProtobufTypes::MessagePtr message =
+ std::make_unique();
+
+ Config::Utility::translateOpaqueConfig(proto_config.typed_config(),
+ ProtobufMessage::getNullValidationVisitor(), *message);
+ const envoy::extensions::private_key_providers::cryptomb::v3alpha::CryptoMbPrivateKeyMethodConfig
+ conf =
+ MessageUtil::downcastAndValidate(
+ *message, private_key_provider_context.messageValidationVisitor());
+
+ std::shared_ptr fakeIpp =
+ std::make_shared(supported_instruction_set_);
+
+ // We need to get more RSA key params in order to be able to use BoringSSL signing functions.
+ std::string private_key =
+ Config::DataSource::read(conf.private_key(), false, private_key_provider_context.api());
+
+ bssl::UniquePtr bio(
+ BIO_new_mem_buf(const_cast(private_key.data()), private_key.size()));
+
+ bssl::UniquePtr pkey(PEM_read_bio_PrivateKey(bio.get(), nullptr, nullptr, nullptr));
+ if (pkey != nullptr && EVP_PKEY_id(pkey.get()) == EVP_PKEY_RSA) {
+ const BIGNUM *e, *n, *d;
+ RSA* rsa = EVP_PKEY_get0_RSA(pkey.get());
+ RSA_get0_key(rsa, &n, &e, &d);
+ fakeIpp->setRsaKey(n, e, d);
+ }
+
+ IppCryptoSharedPtr ipp = std::dynamic_pointer_cast(fakeIpp);
+
+ return std::make_shared(conf, private_key_provider_context,
+ ipp);
+}
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/test/fake_factory.h b/contrib/cryptomb/private_key_providers/test/fake_factory.h
new file mode 100644
index 0000000000000..46fa5d3049e24
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/fake_factory.h
@@ -0,0 +1,65 @@
+#pragma once
+
+#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h"
+#include "envoy/ssl/private_key/private_key.h"
+#include "envoy/ssl/private_key/private_key_config.h"
+
+#include "contrib/cryptomb/private_key_providers/source/ipp_crypto.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+class FakeIppCryptoImpl : public virtual IppCrypto {
+public:
+ FakeIppCryptoImpl(bool supported_instruction_set);
+ ~FakeIppCryptoImpl() override;
+
+ int mbxIsCryptoMbApplicable(uint64_t features) override;
+ uint32_t mbxRsaPrivateCrtSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8],
+ const BIGNUM* const p_pa[8], const BIGNUM* const q_pa[8],
+ const BIGNUM* const dp_pa[8], const BIGNUM* const dq_pa[8],
+ const BIGNUM* const iq_pa[8], int expected_rsa_bitsize) override;
+ uint32_t mbxRsaPublicSslMb8(const uint8_t* const from_pa[8], uint8_t* const to_pa[8],
+ const BIGNUM* const e_pa[8], const BIGNUM* const n_pa[8],
+ int expected_rsa_bitsize) override;
+ bool mbxGetSts(uint32_t status, unsigned req_num) override;
+
+ void setRsaKey(const BIGNUM* n, const BIGNUM* e, const BIGNUM* d) {
+ n_ = BN_dup(n);
+ e_ = BN_dup(e);
+ d_ = BN_dup(d);
+ };
+
+ void injectErrors(bool enabled) { inject_errors_ = enabled; }
+
+private:
+ uint32_t mbxSetSts(uint32_t status, unsigned req_num, bool success);
+
+ bool supported_instruction_set_;
+ BIGNUM* n_{};
+ BIGNUM* e_{};
+ BIGNUM* d_{};
+
+ bool inject_errors_{};
+};
+
+class FakeCryptoMbPrivateKeyMethodFactory : public Ssl::PrivateKeyMethodProviderInstanceFactory {
+public:
+ FakeCryptoMbPrivateKeyMethodFactory(bool supported_instruction_set);
+
+ // Ssl::PrivateKeyMethodProviderInstanceFactory
+ Ssl::PrivateKeyMethodProviderSharedPtr createPrivateKeyMethodProviderInstance(
+ const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& message,
+ Server::Configuration::TransportSocketFactoryContext& private_key_provider_context) override;
+ std::string name() const override { return "cryptomb"; };
+
+private:
+ bool supported_instruction_set_;
+};
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/test/ops_test.cc b/contrib/cryptomb/private_key_providers/test/ops_test.cc
new file mode 100644
index 0000000000000..0756d5869a8fe
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/ops_test.cc
@@ -0,0 +1,436 @@
+#include
+
+#include "source/extensions/transport_sockets/tls/private_key/private_key_manager_impl.h"
+
+#include "test/common/stats/stat_test_utility.h"
+#include "test/test_common/environment.h"
+#include "test/test_common/simulated_time_system.h"
+#include "test/test_common/utility.h"
+
+#include "contrib/cryptomb/private_key_providers/source/cryptomb_private_key_provider.h"
+#include "fake_factory.h"
+#include "gtest/gtest.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace PrivateKeyMethodProvider {
+namespace CryptoMb {
+
+class TestCallbacks : public Envoy::Ssl::PrivateKeyConnectionCallbacks {
+public:
+ void onPrivateKeyMethodComplete() override{
+
+ };
+};
+
+// Testing interface
+ssl_private_key_result_t privateKeyCompleteForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out,
+ size_t* out_len, size_t max_out);
+ssl_private_key_result_t ecdsaPrivateKeySignForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out,
+ size_t* out_len, size_t max_out,
+ uint16_t signature_algorithm, const uint8_t* in,
+ size_t in_len);
+ssl_private_key_result_t rsaPrivateKeySignForTest(CryptoMbPrivateKeyConnection* ops, uint8_t* out,
+ size_t* out_len, size_t max_out,
+ uint16_t signature_algorithm, const uint8_t* in,
+ size_t in_len);
+ssl_private_key_result_t rsaPrivateKeyDecryptForTest(CryptoMbPrivateKeyConnection* ops,
+ uint8_t* out, size_t* out_len, size_t max_out,
+ const uint8_t* in, size_t in_len);
+
+bssl::UniquePtr makeRsaKey() {
+ std::string file = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(
+ "{{ test_rundir }}/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem"));
+ bssl::UniquePtr bio(BIO_new_mem_buf(file.data(), file.size()));
+
+ bssl::UniquePtr key(EVP_PKEY_new());
+
+ RSA* rsa = PEM_read_bio_RSAPrivateKey(bio.get(), nullptr, nullptr, nullptr);
+ RELEASE_ASSERT(rsa != nullptr, "PEM_read_bio_RSAPrivateKey failed.");
+ RELEASE_ASSERT(1 == EVP_PKEY_assign_RSA(key.get(), rsa), "EVP_PKEY_assign_RSA failed.");
+ return key;
+}
+
+bssl::UniquePtr makeEcdsaKey() {
+ std::string file = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(
+ "{{ test_rundir "
+ "}}/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem"));
+ bssl::UniquePtr bio(BIO_new_mem_buf(file.data(), file.size()));
+
+ bssl::UniquePtr key(EVP_PKEY_new());
+
+ EC_KEY* ec = PEM_read_bio_ECPrivateKey(bio.get(), nullptr, nullptr, nullptr);
+
+ RELEASE_ASSERT(ec != nullptr, "PEM_read_bio_ECPrivateKey failed.");
+ RELEASE_ASSERT(1 == EVP_PKEY_assign_EC_KEY(key.get(), ec), "EVP_PKEY_assign_EC_KEY failed.");
+ return key;
+}
+
+TEST(CryptoMbProviderTest, TestEcdsaSigning) {
+ Event::SimulatedTimeSystem time_system;
+ Stats::TestUtil::TestStore server_stats_store;
+ Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system);
+ Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread"));
+ bssl::UniquePtr pkey = makeEcdsaKey();
+ std::shared_ptr fakeIpp = std::make_shared(true);
+
+ CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Ec, 256, fakeIpp, *dispatcher);
+
+ size_t in_len = 32;
+ uint8_t in[32] = {0x7f};
+ size_t out_len = 0;
+ uint8_t out[128] = {0};
+
+ ssl_private_key_result_t res;
+ TestCallbacks cbs;
+
+ // First request
+ CryptoMbPrivateKeyConnection op(cbs, *dispatcher, bssl::UpRef(pkey), queue);
+ res = ecdsaPrivateKeySignForTest(&op, out, &out_len, 128, SSL_SIGN_ECDSA_SECP256R1_SHA256, in,
+ in_len);
+ EXPECT_EQ(res, ssl_private_key_success);
+}
+
+TEST(CryptoMbProviderTest, TestRsaPkcs1Signing) {
+ Event::SimulatedTimeSystem time_system;
+ Stats::TestUtil::TestStore server_stats_store;
+ Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system);
+ Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread"));
+ bssl::UniquePtr pkey = makeRsaKey();
+ std::shared_ptr fakeIpp = std::make_shared(true);
+ RSA* rsa = EVP_PKEY_get0_RSA(pkey.get());
+ const BIGNUM *e, *n, *d;
+ RSA_get0_key(rsa, &n, &e, &d);
+ fakeIpp->setRsaKey(n, e, d);
+
+ CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher);
+
+ size_t in_len = 32;
+ uint8_t in[32] = {0x7f};
+
+ ssl_private_key_result_t res;
+ TestCallbacks cbs[8];
+
+ // First request
+ CryptoMbPrivateKeyConnection op0(cbs[0], *dispatcher, bssl::UpRef(pkey), queue);
+ res =
+ rsaPrivateKeySignForTest(&op0, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ res = privateKeyCompleteForTest(&op0, nullptr, nullptr, 128);
+ // No processing done yet after first request
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ // Second request
+ CryptoMbPrivateKeyConnection op1(cbs[1], *dispatcher, bssl::UpRef(pkey), queue);
+ res =
+ rsaPrivateKeySignForTest(&op1, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ res = privateKeyCompleteForTest(&op1, nullptr, nullptr, 128);
+ // No processing done yet after second request
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ // Six more requests
+ CryptoMbPrivateKeyConnection op2(cbs[2], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op3(cbs[3], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op4(cbs[4], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op5(cbs[5], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op6(cbs[6], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op7(cbs[7], *dispatcher, bssl::UpRef(pkey), queue);
+ res =
+ rsaPrivateKeySignForTest(&op2, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res =
+ rsaPrivateKeySignForTest(&op3, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res =
+ rsaPrivateKeySignForTest(&op4, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res =
+ rsaPrivateKeySignForTest(&op5, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res =
+ rsaPrivateKeySignForTest(&op6, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res =
+ rsaPrivateKeySignForTest(&op7, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ size_t out_len = 0;
+ uint8_t out[128] = {0};
+
+ res = privateKeyCompleteForTest(&op0, out, &out_len, 128);
+ // Since the status is set only from the event loop (which is not run) this should be still
+ // "retry". The cryptographic result is present anyway.
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ op0.mb_ctx_->setStatus(RequestStatus::Success);
+ res = privateKeyCompleteForTest(&op0, out, &out_len, 128);
+ EXPECT_EQ(res, ssl_private_key_success);
+ EXPECT_NE(out_len, 0);
+}
+
+TEST(CryptoMbProviderTest, TestRsaPssSigning) {
+ Event::SimulatedTimeSystem time_system;
+ Stats::TestUtil::TestStore server_stats_store;
+ Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system);
+ Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread"));
+ bssl::UniquePtr pkey = makeRsaKey();
+ std::shared_ptr fakeIpp = std::make_shared(true);
+ RSA* rsa = EVP_PKEY_get0_RSA(pkey.get());
+ const BIGNUM *e, *n, *d;
+ RSA_get0_key(rsa, &n, &e, &d);
+ fakeIpp->setRsaKey(n, e, d);
+
+ CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher);
+
+ size_t in_len = 32;
+ uint8_t in[32] = {0x7f};
+
+ ssl_private_key_result_t res;
+ TestCallbacks cbs[8];
+
+ // First request
+ CryptoMbPrivateKeyConnection op0(cbs[0], *dispatcher, bssl::UpRef(pkey), queue);
+ res = rsaPrivateKeySignForTest(&op0, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ res = privateKeyCompleteForTest(&op0, nullptr, nullptr, 128);
+ // No processing done yet after first request
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ // Second request
+ CryptoMbPrivateKeyConnection op1(cbs[1], *dispatcher, bssl::UpRef(pkey), queue);
+ res = rsaPrivateKeySignForTest(&op1, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ res = privateKeyCompleteForTest(&op1, nullptr, nullptr, 128);
+ // No processing done yet after second request
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ // Six more requests
+ CryptoMbPrivateKeyConnection op2(cbs[2], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op3(cbs[3], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op4(cbs[4], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op5(cbs[5], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op6(cbs[6], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op7(cbs[7], *dispatcher, bssl::UpRef(pkey), queue);
+ res = rsaPrivateKeySignForTest(&op2, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeySignForTest(&op3, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeySignForTest(&op4, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeySignForTest(&op5, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeySignForTest(&op6, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeySignForTest(&op7, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ size_t out_len = 0;
+ uint8_t out[128] = {0};
+
+ res = privateKeyCompleteForTest(&op0, out, &out_len, 128);
+ // Since the status is set only from the event loop (which is not run) this should be still
+ // "retry". The cryptographic result is present anyway.
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ op0.mb_ctx_->setStatus(RequestStatus::Success);
+ res = privateKeyCompleteForTest(&op0, out, &out_len, 128);
+ EXPECT_EQ(res, ssl_private_key_success);
+ EXPECT_NE(out_len, 0);
+}
+
+TEST(CryptoMbProviderTest, TestRsaDecrypt) {
+ Event::SimulatedTimeSystem time_system;
+ Stats::TestUtil::TestStore server_stats_store;
+ Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system);
+ Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread"));
+ bssl::UniquePtr pkey = makeRsaKey();
+ std::shared_ptr fakeIpp = std::make_shared(true);
+ RSA* rsa = EVP_PKEY_get0_RSA(pkey.get());
+ const BIGNUM *e, *n, *d;
+ RSA_get0_key(rsa, &n, &e, &d);
+ fakeIpp->setRsaKey(n, e, d);
+
+ CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher);
+
+ size_t in_len = 32;
+ uint8_t in[32] = {0x7f};
+
+ ssl_private_key_result_t res;
+ TestCallbacks cbs[8];
+
+ // First request
+ CryptoMbPrivateKeyConnection op0(cbs[0], *dispatcher, bssl::UpRef(pkey), queue);
+ res = rsaPrivateKeyDecryptForTest(&op0, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ res = privateKeyCompleteForTest(&op0, nullptr, nullptr, 128);
+ // No processing done yet after first request
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ // Second request
+ CryptoMbPrivateKeyConnection op1(cbs[1], *dispatcher, bssl::UpRef(pkey), queue);
+ res = rsaPrivateKeyDecryptForTest(&op1, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ res = privateKeyCompleteForTest(&op1, nullptr, nullptr, 128);
+ // No processing done yet after second request
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ // Six more requests
+ CryptoMbPrivateKeyConnection op2(cbs[2], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op3(cbs[3], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op4(cbs[4], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op5(cbs[5], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op6(cbs[6], *dispatcher, bssl::UpRef(pkey), queue);
+ CryptoMbPrivateKeyConnection op7(cbs[7], *dispatcher, bssl::UpRef(pkey), queue);
+ res = rsaPrivateKeyDecryptForTest(&op2, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeyDecryptForTest(&op3, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeyDecryptForTest(&op4, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeyDecryptForTest(&op5, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeyDecryptForTest(&op6, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+ res = rsaPrivateKeyDecryptForTest(&op7, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ size_t out_len = 0;
+ uint8_t out[128] = {0};
+
+ res = privateKeyCompleteForTest(&op0, out, &out_len, 128);
+ // Since the status is set only from the event loop (which is not run) this should be still
+ // "retry". The cryptographic result is present anyway.
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ op0.mb_ctx_->setStatus(RequestStatus::Success);
+ res = privateKeyCompleteForTest(&op0, out, &out_len, 128);
+ EXPECT_EQ(res, ssl_private_key_success);
+ EXPECT_NE(out_len, 0);
+}
+
+TEST(CryptoMbProviderTest, TestErrors) {
+ Event::SimulatedTimeSystem time_system;
+ Stats::TestUtil::TestStore server_stats_store;
+ Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system);
+ Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread"));
+ bssl::UniquePtr pkey = makeEcdsaKey();
+ bssl::UniquePtr rsa_pkey = makeRsaKey();
+ std::shared_ptr fakeIpp = std::make_shared(true);
+
+ CryptoMbQueue ec_queue(std::chrono::milliseconds(200), KeyType::Ec, 256, fakeIpp, *dispatcher);
+ CryptoMbQueue rsa_queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher);
+
+ size_t in_len = 32;
+ uint8_t in[32] = {0x7f};
+
+ ssl_private_key_result_t res;
+ TestCallbacks cb;
+
+ CryptoMbPrivateKeyConnection op_ec(cb, *dispatcher, bssl::UpRef(pkey), ec_queue);
+ CryptoMbPrivateKeyConnection op_rsa(cb, *dispatcher, bssl::UpRef(rsa_pkey), rsa_queue);
+
+ // no operation defined
+ res = ecdsaPrivateKeySignForTest(nullptr, nullptr, nullptr, 128, SSL_SIGN_ECDSA_SECP256R1_SHA256,
+ in, in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res =
+ rsaPrivateKeySignForTest(nullptr, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res = rsaPrivateKeyDecryptForTest(nullptr, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+
+ // Unknown signature algorithm
+ res = ecdsaPrivateKeySignForTest(&op_ec, nullptr, nullptr, 128, 1234, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res = rsaPrivateKeySignForTest(&op_rsa, nullptr, nullptr, 128, 1234, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+
+ // Wrong signature algorithm
+ res = ecdsaPrivateKeySignForTest(&op_ec, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in,
+ in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res = rsaPrivateKeySignForTest(&op_rsa, nullptr, nullptr, 128, SSL_SIGN_ECDSA_SECP256R1_SHA256,
+ in, in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+
+ // Wrong operation type
+ res = ecdsaPrivateKeySignForTest(&op_rsa, nullptr, nullptr, 128, SSL_SIGN_ECDSA_SECP256R1_SHA256,
+ in, in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res =
+ rsaPrivateKeySignForTest(&op_ec, nullptr, nullptr, 128, SSL_SIGN_RSA_PSS_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+ res = rsaPrivateKeyDecryptForTest(&op_ec, nullptr, nullptr, 128, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_failure);
+}
+
+TEST(CryptoMbProviderTest, TestRSATimer) {
+ Event::SimulatedTimeSystem time_system;
+ Stats::TestUtil::TestStore server_stats_store;
+ Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system);
+ Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread"));
+ bssl::UniquePtr pkey = makeRsaKey();
+ std::shared_ptr fakeIpp = std::make_shared(true);
+ RSA* rsa = EVP_PKEY_get0_RSA(pkey.get());
+ const BIGNUM *e, *n, *d;
+ RSA_get0_key(rsa, &n, &e, &d);
+ fakeIpp->setRsaKey(n, e, d);
+
+ CryptoMbQueue queue(std::chrono::milliseconds(200), KeyType::Rsa, 1024, fakeIpp, *dispatcher);
+
+ size_t in_len = 32;
+ uint8_t in[32] = {0x7f};
+
+ ssl_private_key_result_t res;
+ TestCallbacks cbs[8];
+
+ // First request
+ CryptoMbPrivateKeyConnection op0(cbs[0], *dispatcher, bssl::UpRef(pkey), queue);
+ res =
+ rsaPrivateKeySignForTest(&op0, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ res = privateKeyCompleteForTest(&op0, nullptr, nullptr, 128);
+ // No processing done yet after first request
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ time_system.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher,
+ Event::Dispatcher::RunType::NonBlock);
+
+ size_t out_len = 0;
+ uint8_t out[128] = {0};
+
+ res = privateKeyCompleteForTest(&op0, out, &out_len, 128);
+ EXPECT_EQ(res, ssl_private_key_success);
+ EXPECT_NE(out_len, 0);
+
+ // Add crypto library errors
+ fakeIpp->injectErrors(true);
+
+ CryptoMbPrivateKeyConnection op1(cbs[0], *dispatcher, bssl::UpRef(pkey), queue);
+ res =
+ rsaPrivateKeySignForTest(&op1, nullptr, nullptr, 128, SSL_SIGN_RSA_PKCS1_SHA256, in, in_len);
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ res = privateKeyCompleteForTest(&op1, nullptr, nullptr, 128);
+ // No processing done yet after first request
+ EXPECT_EQ(res, ssl_private_key_retry);
+
+ time_system.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher,
+ Event::Dispatcher::RunType::NonBlock);
+
+ res = privateKeyCompleteForTest(&op1, out, &out_len, 128);
+ EXPECT_EQ(res, ssl_private_key_failure);
+}
+
+} // namespace CryptoMb
+} // namespace PrivateKeyMethodProvider
+} // namespace Extensions
+} // namespace Envoy
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/BUILD b/contrib/cryptomb/private_key_providers/test/test_data/BUILD
new file mode 100644
index 0000000000000..f55a73857b846
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/BUILD
@@ -0,0 +1,13 @@
+load(
+ "//bazel:envoy_build_system.bzl",
+ "envoy_contrib_package",
+)
+
+licenses(["notice"]) # Apache 2
+
+envoy_contrib_package()
+
+filegroup(
+ name = "certs",
+ srcs = glob(["*.pem"]),
+)
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem b/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem
new file mode 100644
index 0000000000000..60d9e4c83180e
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p256.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIMpJw5U66K+DcA963b+/jZYrMrZDjaB0khHSwZte3vYCoAoGCCqGSM49
+AwEHoUQDQgAELp3XvBfkVWQBOKo3ttAaJ6SUaUb8uKqCS504WXHWMO4h89F+nYtC
+Ecgl8EiLXXyc86tawKjGdizcCjrKMiFo3A==
+-----END EC PRIVATE KEY-----
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p384.pem b/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p384.pem
new file mode 100644
index 0000000000000..9bf5ffe14bb33
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/ecdsa-p384.pem
@@ -0,0 +1,6 @@
+-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDBE7nkbGPIsiG0S1vajwJkfVtlZM7+KhrN9LSqolHsNAv/t4kGA0Sn3
+McnWqcts9RugBwYFK4EEACKhZANiAAT2tXd7DLnmD9JL+YNYH4+RgBgQSD5DnP90
+Xu8uuOUZwO3ZLdzuf+TRs0MneULXS3fWqBCYo7gNPRdZR40QrT/4dQGpQsDAFl3f
+Yg9Un5cxR+XovaseGsnMQoP80majEYA=
+-----END EC PRIVATE KEY-----
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/generate-keys.sh b/contrib/cryptomb/private_key_providers/test/test_data/generate-keys.sh
new file mode 100755
index 0000000000000..95b1e59faf379
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/generate-keys.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -e
+
+openssl ecparam -name prime256v1 -genkey -noout -out ecdsa-p256.pem
+openssl ecparam -name secp384r1 -genkey -noout -out ecdsa-p384.pem
+openssl genrsa -out rsa-512.pem 512
+openssl genrsa -out rsa-1024.pem 1024
+openssl genrsa -out rsa-2048.pem 2048
+openssl genrsa -3 -out rsa-2048-exponent-3.pem 2048
+openssl genrsa -out rsa-3072.pem 3072
+openssl genrsa -out rsa-4096.pem 4096
+
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem
new file mode 100644
index 0000000000000..38f23d45a246e
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-1024.pem
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXwIBAAKBgQDX/SburqyA+DDb1Kz24bC1QPqXAZKKk5eUX6kXyU4Xg/532gvb
+eX9ie2S/NdQwH9UU4jO2pTdph7R7BJZlqTxRjrIre83mIeZPlsUh4T7LACE19T0p
+xvxxjZphRYUNcz+06KRaT91IdscCJkVCHFPD9DquVMyUwp5pjfFEw6eRYQIDAQAB
+AoGBAM2FlLs/uOPLxuoXWDJflT8Twp9YHrIAORc1Y3g/1DAqKESxVeEUnnL+iWIs
+/WiBkceaaqzcT3r6Z3E7b0TzIXGVctJhOomclH6+NNOtC63WWXh6IFX/9YBDszUS
+kRsIvOiqDqqAm6VVYpSArJvXFHMMLKvFtqu+AFW8zFkjkrhlAkEA+ZVunIWERDWL
+KMxX/BZCkeIH3Rv/TczRrj7WqWPcIIPsBOsvqfL/xI3peagFs3TeLU/o16caWoLS
+M7TltvTqtwJBAN2Kn+XiVm7GB9ITwoTNH95n6GL+3fX0jgwUvsygfo/UlhWoIMUh
+Rb2Ic/72cDasBrnxIWjKlahqEOJh0YvNLKcCQQCYKP1VmaTovMgJaINfoeaV7/qh
+V9dPhEZ1d8QBY2spu6Ph38ygTRCXsXkc/U30eZSWhXhMOYk8kzM56Nh/sVODAkEA
+gAMzI9WmVfnt2PD8DFqu2Ie4G0PkI9P9JHP0UC9JEnknhDoTPXVdZAht5lymOKEs
+fdMcl/2/foJTYUxeleanrwJBAKpgVmeC6Z8aUiDCJOEgfr9KvvWf77S2+PmgZwFx
+lSK/Hz/MDhTHJlSjOin3gpl82VL5cmZywClkWIh1wE8PCvg=
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048-exponent-3.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048-exponent-3.pem
new file mode 100644
index 0000000000000..87786c8c63f74
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048-exponent-3.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAwOQVkfs063s0I/wq/cBW63pX+6OxMxLXohNdBHCITbnwnI1B
+xkEA6FvNWN+xRH2AW/8aiyyYSy38BErJwgOAbA1e9b6knYfZb5IIMVmnx519R7CK
+TdYrZMnHbyvN3U2xULJZ+y+gPCnRihZSrL/hXlYHkyt/ef5OSxshyrjxN9RmxBLs
+zTus5G+04mHN2RlmnjOALOjXApeHD9YGLddODqp89TUV+LpddI+s+0KFaRUWMxne
+pegA+awoGSk4OGwHFbkMz5eIH9CZYCL3d6nt3Q9R48t4wsDGTJaNaAeg5Bv+6+pO
+MuP9QteNl3d3ro/8KZOe7vvXDuAy/86tTjbYswIBAwKCAQEAgJgOYVIjR6d4F/1x
+/oA58lGP/RfLd2HlFreTWEsFiSagaF4r2YCrRZKI5ep2LakAPVS8XMhlh3P9WDHb
+1q0ASAjp+SnDE6/mSmFay5EahROo2nWxiTlyQzEvn3KJPjPLiyGRUh/AKBvhBrmM
+cyqWPuQFDMeqUVQ0MhIWhyX2JTccM09IXbmV2WpAijw7encniBB8jy9eEzIkK6yE
+HrDBOAN5Cq5MPgcbEZLrnixSKjm6Ti6KUMjhrPfUgfkS64eHtvdtKGoj9nWFqBjA
+stmwZyCoYdLaeeuWsiT+m3lGJCH3YKM8dX723OIIUwlsld7kp9I5i6e153HZVow6
+Va+gCwKBgQDg7VUKVHOJdzTRPw+N8xTJEzDsdHu0FhvD5HSZK9vCx05GOo2hC0Qj
+1Zn0B+owIaMJmegcIFxoYxSasmZwkZYUyTNoXCa1hrS5D+C7IHZXYlCHhiws0T2v
+Ak9fSEKliTov82TzpRCXq6C1NpghF0AIal5cqyUoBNQXn69zMqsKqQKBgQDbicb1
+7DHCpxuC0/fmrlHiPunY4aYVz7CM6d6m0/Jpc1cBKqICkGuRBJlXhhXaCBt1I+vz
+DF5GEyPOo80rRYqmuhKAttGcp2uX1B0bSu0N4A3NyuMEOqG1Pw+wNo8SJK7b55B/
+3ZTzS+PL5FEztoGcw3nr8lseLuFVXkzimwRd+wKBgQCV844G4vew+iM2Kgpeog3b
+YiCdov0iuWfX7aMQx+fXL4mEJwkWB4LCjmair/F1a8IGZpq9auhFl2MRzERLC7lj
+MMzwPW8jryMmCpXSFaQ6QYsFBB1zNikfVt+U2tcZBibKokNNGLW6cmsjebrAuiqw
+RumTHMNwAzgPv8pMzHIHGwKBgQCSW9n5SCEsb2esjU/vHuFBf0aQlm65NSBd8T8Z
+4qGbojoAxxasYEe2AxDlBA6RWrz4wp1MsumEDMKJwojHg7HEfAxVzzZob50P4r4S
+MfNelV6JMeytfGvOKgp1ebS2wx89RQr/6Q33h+0ymDYieau916adTDy+yeuOPt3s
+Z1g+pwKBgQC5jdeQ5QVHWdpJnRSgiPsODpQ+AD+5H8ERTIWpb0e4uWfR76eOc2G3
+NLdXvpU31ady05mVS1dCZsZyEytOkG2+AgfyjJsIfwxw+5PZCoV3xm90tb/43l+U
+3YyKh/lmCfL8AM73ipXqSN1rQBf4gp4sXxJ0fbProsRQEIqveoyt8g==
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048.pem
new file mode 100644
index 0000000000000..f1134bb17ed71
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-2048.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAtBPRFC+8WpCauAyIr1uCTSK6qtAeevEW1vRkn/KkFQX27UWS
+NgU/IukTbA091BDae7HEiWSp7IA1IDbu2q4IwY9UksjF8yFVNZYifr/IzS6lbHOI
+ZRxuBzQOWgn0+7WNqzylXQ4y88yVVqSsdfiB8kJHi9o5r+M/3TBOrWCu75iYJeBV
+w0nhMYIYOxB0RkPqB1+5z4cgLjyZYuC6iZe+9m718J4LRHTd60lg9wtg4H7RUE3u
+VgjLSNpNyvVpOW2qHq+o21gdS7xBQ3pbD619vBWeNDkvCaBp6YZw4ENhUxeg4xaZ
+nOrNEKZw4HQnzklDJe1a69InQI6F2b/26VEGgQIDAQABAoIBABKTzMkBV7QcIOoF
+2QAGN74PbCR9Dffu8UVBtzPNC2Jj2CKIP9o01luaofdOsmczSebi4vytlt4gJ9rn
+7+I9fAfD6pyt+8XmVW0OzQY4cNXCDyzOCm8r7Knvk990EYL6KuBUhFbCRT1jiLCE
+koolFfrRHaJu4+6iSg9ekW9PfxyWfAxtEp4XrrqgN4jN3Lrx1rYCZnuYp3Lb+7WI
+fJC/rK6MTphUMLbPMvmUwHjFzoe7g9MZxRRY3kY3h1n3Ju1ZbaCbP0Vi/+tdgKAl
+290J2MStWWJfOoTNnnOSYhWIPQUiFtuUiab7tJ90GGb1DyLbOrr6wG2awJoqF9ZM
+Qwvkf/UCgYEA5dsHhxuX+cTHF6m+aGuyB0pF/cnFccTtwWz2WwiH6tldnOAIPfSi
+WJU33C988KwJFmAurcW43VVVs7fxYbC6ptQluEI+/L/3Mj/3WgwZaqX00cEPkzKA
+M1XbvanQAU0nGfq+ja7sZVpdbBoBUb6Bh7HFyLM3LgliT0kMQeolKXMCgYEAyI9W
+tEHnkHoPjAVSSobBOqCVpTp1WGb7XoxhahjjZcTOgxucna26mUyJeHQrOPp88PJo
+xxdDJU410p/tZARtFBoAa++IK9qC6QLjN23CuwhD7y6RNZsRZg0kOCg9SLj+zVj5
+mrvZFf6663EpL82UZ2zUGl4L1sMhYkia0TMjYzsCgYAFHuAIDoFQOyYETO/E+8E3
+kFwGz1vqsOxrBrZmSMZeYQFI4WTNnImRV6Gq8hPieLKrIPFpRaJcq+4A1vQ1rO47
+kTZV6IPmtZAYOnyUMPjP+2p80cQ7D0Dz49HFY+cSYFmipodgOKljiKPUKLAm1guk
+rj0tv3BXQjZCdeoj/cdeKQKBgF8u3+hWqs5/j2dVkzN5drUbR0oOT2iwHzZFC2pt
++2XuHFBOx2px6/AbSdbX0zeMccVsVlu+Z4iJ8LNQYTqpexciK/cNzCN75csuKqXA
+ur1G8+7Mu++j84LqU7kvJ76exZaxVmygICv3I8DfiLt+JqNbG+KTpay8GNjrOkZ0
+raPHAoGAQ1p/Qvp7DHP2qOnUB/rItEVgWECD3uPx4NOCq7Zcx7mb9p7CI6nePT5y
+heHpaJIqVyaS5/LHJDwvdB1nvtlgc9xKa5d1fWhLL3dwFCa98x5PDlN/JztH8DIt
+tTlD+8NECIvI+ytbzLS0PZWBYctAR2rP2qlMCGdYerdjwl8S98E=
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-3072.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-3072.pem
new file mode 100644
index 0000000000000..7fc26c3b2a535
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-3072.pem
@@ -0,0 +1,39 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIG4wIBAAKCAYEAv86yDTGazbq0uf6Xji4kdROCQaSmL+Ttse06BVXDiv7nf/13
+wJwt6tQzexw1GXQ5P2I0Mjh1NUr1tFXf3D99laJE209GEVWaQPEdoyRLdVibgH8I
+rtt8IhEIHMif8bBXONPwAy4M1HP0AWMIG0WV29ZTgZGmHknI+Rkm1P0bGhod6YBI
+sNNY59PjL0tWOf1M8Mj6OOcDS7wBgVMNZSA0MlkUf10Kes4fay/gqDrM2dEHi1lB
+7VNQ6nWbSNi5qTPMfhi0bkc64l2fXQ9qQ1BFaFR6AV/EQHQUhFv+AJ62d5vxssi3
+4DgLVz9XVaGXyNHahdxtLGdBzbbZ9OKPXnSGZdfhBxvr3iKj/MDBFq97mzjXisOu
+ZljFXx+I5VEiYjgdB2k+wFVOv9XNvwVQdO7p9lX9PJrV6pOKD5tJgh842j3134vA
+PaANDaZBbYj+8HN4oDU9Dtvi7INO7G2yXsCiC1xWwczHBhKv+M2cip4eL8XH0OZj
+hZi78mDSW1cY4x0xAgMBAAECggGALmijmh+jdh2ztsEMCIHPnmg+/wUIlNQOUxu0
+CzBqJMpyVvyMKhVf4s6/Og1kJ6mAZH6tZG1WprNhaeXRSWgvSbI+eNXgfTc6IHZ2
+lk+k1lq/HUMfZbeDfHakgNpmIMNrBzv4ebx9rDX2FxPQFVEr5kogYFxOVkvCDctx
+It2u3gztqd65N9ebTlRtRrcywMsx/5yRNo1mtb9imdjvh8VX+8qj9AEeFQKkhXnL
+IMlBYX57FdNd6T9cd65HCUQu/+qhJ7E/cRdoYTG0Hp9bc7pXM7b+6KqonUiMmi1k
+SzNlEuYU3SEFKen1K0b8Bk+d/abUaWsqPDPA4jBQye5AdCIMYMazT/KNBGZkNBPu
+abmlAut6MDVkwFsLDU5S4tpKqKUUeVmN7jte3BERICMQr3WD6cXd/AtdMrwWaBVm
+HTq9Lf7GFUxIDVU1pyGGHkfeXjoK+Ur5DIwJEr6BfhEMC6LGLU3UeWcZOKm8nkc4
+8ue73s+D+mcK4BiI0kRTwXejcnM1AoHBAN9VGvoyvR6s6J5lF59b+B/qQhU4MNfD
+vyJS7UxZke/JNWXPKlrij1bZgz2vgjsF+WaPYc1BVKjxRy1fciNlIqaEsjgnxm4M
+cHUD8uPeQi8ARPwm+kbRr/IVbD7pFIyKurYraNt3kQQq9aE9fOSOxjbnY+GhGbBM
+WnbZp0TH2oUbqHuKRmGUsFIS0tAhfObo4OebHOhiyIB+I7OZQKBU2qVIWyAinjFN
+PXiZ9ZT78v0YHZmI29ADEFw0cV1+R04P2wKBwQDb3Rrw+YXPifJDdeaupSno6xJ1
+ACWc0Oof7LVRZC8JJw+eMtgbx9VCcQs811QHJ+q9gXRR3JC38J1JCYJaMcCpG++H
+Spl3t2Rsd7dT3RxamYRnUtw6yFUTxYtEkbbViURxh5ghIMEuTyQ7vcV6SXNubGxp
+Pc/TQboUZHpehSnJhNiFXsCHgjd7eu/q6cxMqaaqULIie+sfbB39lnbuZ53f1pLZ
+7zb2E98m0DtjVstv+NYMoY7DG2DsJ8skw7QNiuMCgcEAhoYI/fRaHoQgimhyVjdb
+uj2tGIMESLNMCizRa0/4q+sTEwQ6iww1MydJ+nohg9QRakmrq6tSh4DuUtJPOirN
+OGhtwY2T5O3xP0rln4RdcVpEM253Cvl7deKZlTtoeU+HL/vt6WSYIV6PHlSfSj0G
+AERY0avsgVk8lKJ+Mtv/MHZ8gg3EXzrlCkr0WRIS3jQgZOH2A7Sc+WkBsEj7uJfk
+K/LtkOnJSAEyqdZzKw6oSvOfwL/DSATQcfnU33AVG1xJAoHAP7KYOpZgARe7G3ZO
+Be0N7lAkgccwHnWcTvF6OoFm6yTo7nFWkP3dOCmeEttjmcsjxBF8TLc1KkVInD9o
+B4+AfL3+MFkZi0iTjKVGdMKLk2gEpxanfVRK6baCubPpn4XsUWPyXC6sKSl+mtxJ
+GTuseKJq6jahPlg9e+j8VuQHjj2xqTohV2EPh/O/DHT93nwMMC2+3iS+otTk/3IX
+NMzKPW5iD0exyatKLGmJuycLO36BLRmlTbth4ilJPnaAdKf3AoHAANX+2JM3xxc0
+CchCT31ZEnkGRjDtQCZ+gR6BWeWF02HpEXk+scHthD5hUyBwrJkYMTT9wCLkerQl
+AioOGJCZkkio/5NDkPmJ80EW/4OcQc/p/uUY5H3PEsLAJBKbb3q30FC0KRlvhg3h
+HqyzAOuEjuBJcaCNsN/c+XYIQEbN4I51Zm04HVIDoCYeBs+XRd56foVOSUfW1Dm1
+jgbcxHBaZ1FYeBWoGutBj+BYGVNwD0XpltBVStlinaH48FWKCHiW
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem
new file mode 100644
index 0000000000000..a7153f21ee2ae
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-4096.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKQIBAAKCAgEAwvYN7bZNnIbifmo4WlYqQPXoCXjLRUspAX8jkViYJH4XIXD6
+lajlKOxvW27D9rIYjfNFShmfI8cf4IrXgrWUgV0TYBi8qm/Ppnr38R8wFy0z9fE6
+anF61vKdxjsW89NhT4QjvDNmOS6B6gK4JxSWo2MNpqxX79IOidc8T6mX8nxLYZek
+fb3qXeL8lGp46cwRnwmD0eua8r+9hHU/W4q3LBjVCEarK/8Haj+Mfccn97QAiaeh
+44/oALNRK4cUr2L3s0JuiXf3ihLi6yWXZ9LIklB8rvjWaK5IgYRlLE1Kwc/k1WCC
+H7IQVLRnZAZxhs/pXN+9XQYgnNDn9Bczj8UMNmzVs3wQQ+TMjpNSSEvUTIs6gfNH
+MwiPfbKXjAXKWSJBOfK77mtcUByT2iilW7lwUYRebl+Sh9jJSYzhOdiIrBgtUTtG
+C0bEhBHKNYt13C5NRLU2UpxxqDKdlV4iz7eDZONuasY2sU9krKNh631kyHe66EBx
+B2IIA48apavlyzHP6TyTw8PM00oL5UOmCsd1oZi1kaJEH0yLdWMvfU/d0mLMlTUF
+3bHsoe6zwn8ZhDQ4X781cxxt04j87bNNcDetJ2+4R/GsxyRozetJ3sf1DWYxeux0
++WbZwCbuFkJPqiLMcSNU7Mzzpqwsz6muRXexZrR0T+Mhfi4mSoKmhwgcRrkCAwEA
+AQKCAgEAitUHFCkG2Zkf/t+LI6FKU5oJU31mWwDrwXoVDPKm3Q3BUPoQJ7TvAxZj
+MhwNhkZKwALla6AVODgbVh3o29aMWxa5rmvPJhubJjVZDKal60swPko6zAPlct2w
+RrJbZOQ43pP9ko6Tk9KvhsHK+2Fo6A6ocPE3absOyU+xThU5895ZG7UN2ND97T+v
+l8y+L9c3ESaIbVs2qvipb4LCzGBakhdY1JzmVd01HY8Fb6f7qSPQYjYyTUdz8dO/
+0JxPuZljRI7hAhmg9/z9BHBvgeqTE8YnE751iHPbhAeQuFyOCLgPf3dLEmENyhPx
+sEP6YgL6bj2RroWngaQuGR7Qg+fRCkxBfBZGgkZd+WbN6Yv0EK3ktDkCcmi6urx3
+6kOb4BWyRbwZCUMF4Jzo0aQYZSZQuHcl6QZbX6AhCSzPWjMyb7Q5GApbwRNRBcjI
+iu8Ts+kXMiWtF7+Bd78JTnr6PPGMiPvjlZCGYTmvzq5/EJmIFvB8XlV3iskb29+w
+9Fa/In+/mXuFKPM6Vpc6+FxPzm1P2cP68/jqB3mg3unwx0ecXxjkHBJKcPwNCTaz
+nfPxjV+ep9VwyKViJ6SX3OPeMbsNQl6Zr4oCF+qvVZMGsrmd9fpE5NrKA7I+zYBE
+6pyGod7cZRiBU41uVO44cFiSTURUPHilwFaTVwb4p2BAHfVKWJUCggEBAPu57609
+SQZ8O4VpiHPDOa0inZJvzZ+0lAo6NOMyhTvY9CPn+YjfYNfol525W+T7JnrvWeic
+FIin9jHxnpXK0B/AftUJTw4rEpCio5f79xpFAES0vw+241+4XOI7c9LCOC+FWBiT
+4DVfYoob31vyK9+7pRSZ+SiMiu3vAY7seUSIAL0SvpvSbDyzZ+2eVZWBEgW6LVrz
+tDE8oNqQUMXA2/FchZmv9ysATBmsTJBo9POVkKNTyUMQzxtslvC6v4oNr7K3OIHb
+gcf4TzWXOzn9tG+4xYT9pWW5msGawiUZRcTaAna+E/OgpKWnEnZgCC4hboRLxPLX
+53FmOM6M2G7thzsCggEBAMZFZyj++yu5Efce9ccQX6LyfHrqXjNkrU+u9Ed1v5WV
+N9ACMmXrHJh5IYa5rhfMM4jWMFWyYbfsCzpfGn4S+5y/fzoFLoryPyH9ygDePA0Q
+sRF/PLveyOyVWvP0QtqMjivV40zHpySlUdTfH6DMpEJcutN4Zb28++HVcrwhoC1k
+6JAv6lMi0JVZPOQ9PdvWfqWiUFyWL4D2UzzGunCyDZRKhNFFZK8ZMwGrq1GwKbMY
+UjR1V446+XmVy1UmJBcsfv8N1GNAmS4JZnIYAA3F79gSaPMouBiGEoq9STZNMV8i
+yWX+q2+xF5on0HLiQQDF+CDf7TKQdn9nIb5T9UxH0psCggEBAIXdp5wVT1RnlK+q
+I6qMU4vDMNDDLDKWMXQO6Dk6kKf3BD5kCsvDHxky1A5ImJn5Bcmyp7mP50uVJtQA
+jjlSlcOM9uYMAUKjnUfVdfJJSGtr7mybQk/1Do3E2YBl5X5bUs3St7q6SS2ZACo6
+EFoxOyvL+kouZ9Ysh2VpCQccspDDUsE5yqvLB0xwjABNh23uKctp6tzHZTgZ6eDR
+hmLj4RNGBLZqYaM4kT/F5SGW70zqaPSyhgFoWvtWkB6M8XdVN+5uiVplhWr2ngj9
+171LMj2HoWLMEL08KxRyXHSmL/gzh8Pl6W5SK7z8UFErWK5PkRpBwQbz0lz345XU
+SPUU1CkCggEANl19Zd5UnK6IYIxatBSnRJTc0Q9GeS0X4IuGa7m7qJZb0Wtoyk1n
+b9928+wh8mCJmPGyGy1oYfgVNBIzv3IRD+/qQMx+8JZv/TALnaVSLfrvsm6DBriV
+u1lQsSe+1RJJQXfKdZkhGYlDIO11TLAQMiofCDExJI+XOVs/tJo7w3ax1idtRxJ6
+wxjO+35XYdo0q9IP2vNLXz8rn7SWFqSb2Jd+R6uRafms5L5foWx+Yp8+mye5tEQz
+d8U9FcH5j2kiAWmXXQeveNIXBx3RlfU6SLLzmAkIQaExoF06n+sWqJFvS8pC08Np
+R1m3bGjhjga6fLkp3v2BoHl05S08S4g+FQKCAQARIy6gUizXPEv8x1Pk70LzHPDQ
+y8Og3OozpYd1/AyY2TDPYdRhg4zk+R5JIYLQfsitdH5uzCO+UFwsKwhv76TIqN+D
+ht+vEoJ0UXrVG8MQe8VnV7+vdQtYrpYMi/SqnXFQuzaqW3LtWVEjXQFOosNGEtNV
+VupzeqsYa5GhhzagCTLMxkgAmyo7/GVa5h4dreiv3hq1xOwbmD7Y6J29NGJwlQPY
+7nw4rBtXxbMxThVgktLHuKLBapr2Gulg4tDTj4e1VoM8u9Y34imc5NevoQ32hfvr
+cZErq0WhWXvqvmbwRyyf1GRizFSdJ71TxT7EG71IKIMzcapod9UXtXXf/IWF
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/cryptomb/private_key_providers/test/test_data/rsa-512.pem b/contrib/cryptomb/private_key_providers/test/test_data/rsa-512.pem
new file mode 100644
index 0000000000000..250404e4b7617
--- /dev/null
+++ b/contrib/cryptomb/private_key_providers/test/test_data/rsa-512.pem
@@ -0,0 +1,9 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIBOgIBAAJBAJ5UTbiBg1SYFhLgXPawBf+joDBSVqsWQ4SS2lCt+pLSkit19LK9
+73f5ZYBhZFsszPzPn7bFEGVwsRiegtf5FccCAwEAAQJABykg6rtQyT6noOrsyWtk
+mg84a3cN3GcBXrmVjhiQQJubMkUNFE+l0JPNb8kzwzhcm8kzuO92CkqFSLXyQyDP
+IQIhAMxkU+gZ5HIStWr0yPMOVoikNjcTW9PGtcUu1zucodilAiEAxk6KGDnHr9Dq
+U150jT9aQTlZiZ6lF9kRU/gCcaIHPPsCIQC/6fRfGvDFq4tswittDSlzY70EOckf
+MJW8cB7oekn9gQIgGVRg2TVQJ0nlFF8FPiFwctJTeHuWFNS6HOKZ1U/f4s8CIBAA
+Kw3ZPC6zxfQNnkI+c96cbTQhQrVPGPlx5fC3l0Ru
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/exe/BUILD b/contrib/exe/BUILD
index b70a786989d7a..1cf08ac421632 100644
--- a/contrib/exe/BUILD
+++ b/contrib/exe/BUILD
@@ -6,6 +6,8 @@ load(
)
load(
"//contrib:all_contrib_extensions.bzl",
+ "ARM64_SKIP_CONTRIB_TARGETS",
+ "PPC_SKIP_CONTRIB_TARGETS",
"envoy_all_contrib_extensions",
)
@@ -37,5 +39,9 @@ envoy_cc_test(
},
deps = [
"//test/config_test:example_configs_test_lib",
- ] + envoy_all_contrib_extensions(),
+ ] + select({
+ "//bazel:linux_aarch64": envoy_all_contrib_extensions(ARM64_SKIP_CONTRIB_TARGETS),
+ "//bazel:linux_ppc": envoy_all_contrib_extensions(PPC_SKIP_CONTRIB_TARGETS),
+ "//conditions:default": envoy_all_contrib_extensions(),
+ }),
)
diff --git a/contrib/extensions_metadata.yaml b/contrib/extensions_metadata.yaml
index 8614d2dbddb83..215a7936f0604 100644
--- a/contrib/extensions_metadata.yaml
+++ b/contrib/extensions_metadata.yaml
@@ -33,4 +33,18 @@ envoy.filters.network.postgres_proxy:
- envoy.filters.network
security_posture: requires_trusted_downstream_and_upstream
status: stable
-
+envoy.filters.network.sip_proxy:
+ categories:
+ - envoy.filters.network
+ security_posture: requires_trusted_downstream_and_upstream
+ status: alpha
+envoy.filters.sip.router:
+ categories:
+ - envoy.sip_proxy.filters
+ security_posture: requires_trusted_downstream_and_upstream
+ status: alpha
+envoy.tls.key_providers.cryptomb:
+ categories:
+ - envoy.tls.key_providers
+ security_posture: robust_to_untrusted_downstream
+ status: alpha
diff --git a/contrib/kafka/filters/network/source/kafka_response.h b/contrib/kafka/filters/network/source/kafka_response.h
index 32bc8317f5131..f135f5cacb744 100644
--- a/contrib/kafka/filters/network/source/kafka_response.h
+++ b/contrib/kafka/filters/network/source/kafka_response.h
@@ -13,7 +13,7 @@ namespace Kafka {
* Decides if response with given api key & version should have tagged fields in header.
* Bear in mind, that ApiVersions responses DO NOT contain tagged fields in header (despite having
* flexible versions) as per
- * https://github.com/apache/kafka/blob/2.4.0/clients/src/main/resources/common/message/ApiVersionsResponse.json#L24
+ * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/resources/common/message/ApiVersionsResponse.json#L24
* This method gets implemented in generated code through 'kafka_response_resolver_cc.j2'.
*
* @param api_key Kafka request key.
diff --git a/contrib/kafka/filters/network/source/kafka_types.h b/contrib/kafka/filters/network/source/kafka_types.h
index 3240b9a9c2d6c..d01c304984e4c 100644
--- a/contrib/kafka/filters/network/source/kafka_types.h
+++ b/contrib/kafka/filters/network/source/kafka_types.h
@@ -31,6 +31,20 @@ using NullableBytes = absl::optional;
*/
template using NullableArray = absl::optional>;
+/**
+ * Analogous to:
+ * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/Uuid.java#L28
+ */
+struct Uuid {
+
+ const int64_t msb_;
+ const int64_t lsb_;
+
+ Uuid(const int64_t msb, const int64_t lsb) : msb_{msb}, lsb_{lsb} {};
+
+ bool operator==(const Uuid& rhs) const { return msb_ == rhs.msb_ && lsb_ == rhs.lsb_; };
+};
+
} // namespace Kafka
} // namespace NetworkFilters
} // namespace Extensions
diff --git a/contrib/kafka/filters/network/source/mesh/BUILD b/contrib/kafka/filters/network/source/mesh/BUILD
index f457afee713ea..4d1481f89fcd8 100644
--- a/contrib/kafka/filters/network/source/mesh/BUILD
+++ b/contrib/kafka/filters/network/source/mesh/BUILD
@@ -109,6 +109,18 @@ envoy_cc_library(
],
)
+envoy_cc_library(
+ name = "outbound_record_lib",
+ srcs = [
+ ],
+ hdrs = [
+ "outbound_record.h",
+ ],
+ tags = ["skip_on_windows"],
+ deps = [
+ ],
+)
+
envoy_cc_library(
name = "upstream_kafka_client_lib",
srcs = [
@@ -118,6 +130,7 @@ envoy_cc_library(
],
tags = ["skip_on_windows"],
deps = [
+ ":outbound_record_lib",
],
)
@@ -131,6 +144,7 @@ envoy_cc_library(
],
tags = ["skip_on_windows"],
deps = [
+ ":outbound_record_lib",
":upstream_kafka_client_lib",
"//envoy/event:dispatcher_interface",
"//source/common/common:minimal_logger_lib",
diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD b/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD
index 6891a3c3ea574..3a1d58d6320a6 100644
--- a/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD
+++ b/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD
@@ -20,28 +20,16 @@ envoy_cc_library(
],
tags = ["skip_on_windows"],
deps = [
- ":produce_outbound_record_lib",
":produce_record_extractor_lib",
"//contrib/kafka/filters/network/source:kafka_request_parser_lib",
"//contrib/kafka/filters/network/source:kafka_response_parser_lib",
"//contrib/kafka/filters/network/source/mesh:abstract_command_lib",
+ "//contrib/kafka/filters/network/source/mesh:outbound_record_lib",
"//contrib/kafka/filters/network/source/mesh:upstream_kafka_facade_lib",
"//source/common/common:minimal_logger_lib",
],
)
-envoy_cc_library(
- name = "produce_outbound_record_lib",
- srcs = [
- ],
- hdrs = [
- "produce_outbound_record.h",
- ],
- tags = ["skip_on_windows"],
- deps = [
- ],
-)
-
envoy_cc_library(
name = "produce_record_extractor_lib",
srcs = [
@@ -52,8 +40,8 @@ envoy_cc_library(
],
tags = ["skip_on_windows"],
deps = [
- ":produce_outbound_record_lib",
"//contrib/kafka/filters/network/source:kafka_request_parser_lib",
+ "//contrib/kafka/filters/network/source/mesh:outbound_record_lib",
],
)
diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc
index 1fa8cfa8f5b82..31cb53f12a402 100644
--- a/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc
+++ b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc
@@ -38,10 +38,10 @@ AbstractResponseSharedPtr ApiVersionsRequestHolder::computeAnswer() const {
request_header_.correlation_id_};
const int16_t error_code = 0;
- const ApiVersionsResponseKey produce_entry = {PRODUCE_REQUEST_API_KEY, MIN_PRODUCE_SUPPORTED,
- MAX_PRODUCE_SUPPORTED};
- const ApiVersionsResponseKey metadata_entry = {METADATA_REQUEST_API_KEY, MIN_METADATA_SUPPORTED,
- MAX_METADATA_SUPPORTED};
+ const ApiVersion produce_entry = {PRODUCE_REQUEST_API_KEY, MIN_PRODUCE_SUPPORTED,
+ MAX_PRODUCE_SUPPORTED};
+ const ApiVersion metadata_entry = {METADATA_REQUEST_API_KEY, MIN_METADATA_SUPPORTED,
+ MAX_METADATA_SUPPORTED};
const ApiVersionsResponse real_response = {error_code, {produce_entry, metadata_entry}};
return std::make_shared>(metadata, real_response);
diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc
index 05b63b451d1fa..07f402a80802e 100644
--- a/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc
+++ b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc
@@ -33,8 +33,14 @@ AbstractResponseSharedPtr MetadataRequestHolder::computeAnswer() const {
advertised_address.second};
std::vector response_topics;
if (request_->data_.topics_) {
- for (const auto& topic : *(request_->data_.topics_)) {
- const std::string& topic_name = topic.name_;
+ for (const MetadataRequestTopic& topic : *(request_->data_.topics_)) {
+ if (!topic.name_) {
+ // The client sent request without topic name (UUID was sent instead).
+ // We do not know how to handle it, so do not send any metadata.
+ // This will cause failures in clients downstream.
+ continue;
+ }
+ const std::string& topic_name = *(topic.name_);
std::vector topic_partitions;
const absl::optional cluster_config =
configuration_.computeClusterConfigForTopic(topic_name);
diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc
index e2ed06fdbb17e..b94b1257c687d 100644
--- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc
+++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc
@@ -20,19 +20,18 @@ ProduceRequestHolder::ProduceRequestHolder(AbstractRequestListener& filter,
const RecordExtractor& record_extractor,
const std::shared_ptr> request)
: BaseInFlightRequest{filter}, kafka_facade_{kafka_facade}, request_{request} {
- outbound_records_ = record_extractor.extractRecords(request_->data_.topics_);
+ outbound_records_ = record_extractor.extractRecords(request_->data_.topic_data_);
expected_responses_ = outbound_records_.size();
}
void ProduceRequestHolder::startProcessing() {
// Main part of the proxy: for each outbound record we get the appropriate sink (effectively a
// facade for upstream Kafka cluster), and send the record to it.
- for (const auto& outbound_record : outbound_records_) {
+ for (const OutboundRecord& outbound_record : outbound_records_) {
KafkaProducer& producer = kafka_facade_.getProducerForTopic(outbound_record.topic_);
// We need to provide our object as first argument, as we will want to be notified when the
// delivery finishes.
- producer.send(shared_from_this(), outbound_record.topic_, outbound_record.partition_,
- outbound_record.key_, outbound_record.value_);
+ producer.send(shared_from_this(), outbound_record);
}
// Corner case handling:
// If we ever receive produce request without records, we need to notify the filter we are ready,
diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h
index 04781366ea90f..d277b6c4bde7b 100644
--- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h
+++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h
@@ -2,8 +2,8 @@
#include "contrib/kafka/filters/network/source/external/requests.h"
#include "contrib/kafka/filters/network/source/mesh/abstract_command.h"
-#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h"
#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h"
+#include "contrib/kafka/filters/network/source/mesh/outbound_record.h"
#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h"
#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h"
diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc
index 3c98dc4885cf9..f7330021fd164 100644
--- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc
+++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc
@@ -10,11 +10,11 @@ std::vector
RecordExtractorImpl::extractRecords(const std::vector& data) const {
std::vector result;
for (const auto& topic_data : data) {
- for (const auto& partition_data : topic_data.partitions_) {
+ for (const auto& partition_data : topic_data.partition_data_) {
// Kafka protocol allows nullable data.
if (partition_data.records_) {
- const auto topic_result = extractPartitionRecords(
- topic_data.name_, partition_data.partition_index_, *(partition_data.records_));
+ const auto topic_result = extractPartitionRecords(topic_data.name_, partition_data.index_,
+ *(partition_data.records_));
std::copy(topic_result.begin(), topic_result.end(), std::back_inserter(result));
}
}
@@ -152,15 +152,17 @@ OutboundRecord RecordExtractorImpl::extractRecord(const std::string& topic, cons
throw EnvoyException(fmt::format("invalid header count in record for [{}-{}]: {}", topic,
partition, headers_count));
}
+ std::vector headers;
+ headers.reserve(headers_count);
for (int32_t i = 0; i < headers_count; ++i) {
- // For now, we ignore headers.
- extractByteArray(data); // Header key.
- extractByteArray(data); // Header value.
+ const absl::string_view header_key = extractByteArray(data);
+ const absl::string_view header_value = extractByteArray(data);
+ headers.emplace_back(header_key, header_value);
}
if (data == expected_end_of_record) {
// We have consumed everything nicely.
- return OutboundRecord{topic, partition, key, value};
+ return OutboundRecord{topic, partition, key, value, headers};
} else {
// Bad data - there are bytes left.
throw EnvoyException(fmt::format("data left after consuming record for [{}-{}]: {}", topic,
diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h
index 59c6e7380e4fa..f17b79dd54dd9 100644
--- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h
+++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h
@@ -1,7 +1,7 @@
#pragma once
#include "contrib/kafka/filters/network/source/external/requests.h"
-#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h"
+#include "contrib/kafka/filters/network/source/mesh/outbound_record.h"
namespace Envoy {
namespace Extensions {
diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h b/contrib/kafka/filters/network/source/mesh/outbound_record.h
similarity index 73%
rename from contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h
rename to contrib/kafka/filters/network/source/mesh/outbound_record.h
index 4174e3dea7e13..a56baa5d2fb5e 100644
--- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h
+++ b/contrib/kafka/filters/network/source/mesh/outbound_record.h
@@ -1,6 +1,8 @@
#pragma once
#include
+#include
+#include
#include "absl/strings/string_view.h"
@@ -10,6 +12,9 @@ namespace NetworkFilters {
namespace Kafka {
namespace Mesh {
+// Kafka header.
+using Header = std::pair;
+
// Binds a single inbound record from Kafka client with its delivery information.
struct OutboundRecord {
@@ -18,15 +23,16 @@ struct OutboundRecord {
const int32_t partition_;
const absl::string_view key_;
const absl::string_view value_;
+ const std::vector headers_;
// These fields will get updated when delivery to upstream Kafka cluster finishes.
int16_t error_code_;
uint32_t saved_offset_;
OutboundRecord(const std::string& topic, const int32_t partition, const absl::string_view key,
- const absl::string_view value)
- : topic_{topic}, partition_{partition}, key_{key}, value_{value}, error_code_{0},
- saved_offset_{0} {};
+ const absl::string_view value, const std::vector& headers)
+ : topic_{topic}, partition_{partition}, key_{key}, value_{value}, headers_{headers},
+ error_code_{0}, saved_offset_{0} {};
};
} // namespace Mesh
diff --git a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h
index 24e9b36efdc65..f034f7da4f421 100644
--- a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h
+++ b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h
@@ -2,10 +2,12 @@
#include